xref: /openbmc/linux/virt/kvm/kvm_main.c (revision b1e1296d)
120c8ccb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20fce5623SAvi Kivity /*
30fce5623SAvi Kivity  * Kernel-based Virtual Machine driver for Linux
40fce5623SAvi Kivity  *
50fce5623SAvi Kivity  * This module enables machines with Intel VT-x extensions to run virtual
60fce5623SAvi Kivity  * machines without emulation or binary translation.
70fce5623SAvi Kivity  *
80fce5623SAvi Kivity  * Copyright (C) 2006 Qumranet, Inc.
99611c187SNicolas Kaiser  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
100fce5623SAvi Kivity  *
110fce5623SAvi Kivity  * Authors:
120fce5623SAvi Kivity  *   Avi Kivity   <avi@qumranet.com>
130fce5623SAvi Kivity  *   Yaniv Kamay  <yaniv@qumranet.com>
140fce5623SAvi Kivity  */
150fce5623SAvi Kivity 
16af669ac6SAndre Przywara #include <kvm/iodev.h>
170fce5623SAvi Kivity 
180fce5623SAvi Kivity #include <linux/kvm_host.h>
190fce5623SAvi Kivity #include <linux/kvm.h>
200fce5623SAvi Kivity #include <linux/module.h>
210fce5623SAvi Kivity #include <linux/errno.h>
220fce5623SAvi Kivity #include <linux/percpu.h>
230fce5623SAvi Kivity #include <linux/mm.h>
240fce5623SAvi Kivity #include <linux/miscdevice.h>
250fce5623SAvi Kivity #include <linux/vmalloc.h>
260fce5623SAvi Kivity #include <linux/reboot.h>
270fce5623SAvi Kivity #include <linux/debugfs.h>
280fce5623SAvi Kivity #include <linux/highmem.h>
290fce5623SAvi Kivity #include <linux/file.h>
30fb3600ccSRafael J. Wysocki #include <linux/syscore_ops.h>
310fce5623SAvi Kivity #include <linux/cpu.h>
32174cd4b1SIngo Molnar #include <linux/sched/signal.h>
336e84f315SIngo Molnar #include <linux/sched/mm.h>
3403441a34SIngo Molnar #include <linux/sched/stat.h>
350fce5623SAvi Kivity #include <linux/cpumask.h>
360fce5623SAvi Kivity #include <linux/smp.h>
370fce5623SAvi Kivity #include <linux/anon_inodes.h>
380fce5623SAvi Kivity #include <linux/profile.h>
390fce5623SAvi Kivity #include <linux/kvm_para.h>
400fce5623SAvi Kivity #include <linux/pagemap.h>
410fce5623SAvi Kivity #include <linux/mman.h>
4235149e21SAnthony Liguori #include <linux/swap.h>
43e56d532fSSheng Yang #include <linux/bitops.h>
44547de29eSMarcelo Tosatti #include <linux/spinlock.h>
456ff5894cSArnd Bergmann #include <linux/compat.h>
46bc6678a3SMarcelo Tosatti #include <linux/srcu.h>
478f0b1ab6SJoerg Roedel #include <linux/hugetlb.h>
485a0e3ad6STejun Heo #include <linux/slab.h>
49743eeb0bSSasha Levin #include <linux/sort.h>
50743eeb0bSSasha Levin #include <linux/bsearch.h>
51c011d23bSPaolo Bonzini #include <linux/io.h>
522eb06c30SWanpeng Li #include <linux/lockdep.h>
53c57c8046SJunaid Shahid #include <linux/kthread.h>
542fdef3a2SSergey Senozhatsky #include <linux/suspend.h>
550fce5623SAvi Kivity 
560fce5623SAvi Kivity #include <asm/processor.h>
572ea75be3SDavid Matlack #include <asm/ioctl.h>
587c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
590fce5623SAvi Kivity 
605f94c174SLaurent Vivier #include "coalesced_mmio.h"
61af585b92SGleb Natapov #include "async_pf.h"
62982ed0deSDavid Woodhouse #include "kvm_mm.h"
633c3c29fdSPaolo Bonzini #include "vfio.h"
645f94c174SLaurent Vivier 
654c8c3c7fSValentin Schneider #include <trace/events/ipi.h>
664c8c3c7fSValentin Schneider 
67229456fcSMarcelo Tosatti #define CREATE_TRACE_POINTS
68229456fcSMarcelo Tosatti #include <trace/events/kvm.h>
69229456fcSMarcelo Tosatti 
70fb04a1edSPeter Xu #include <linux/kvm_dirty_ring.h>
71fb04a1edSPeter Xu 
724c8c3c7fSValentin Schneider 
73536a6f88SJanosch Frank /* Worst case buffer size needed for holding an integer. */
74536a6f88SJanosch Frank #define ITOA_MAX_LEN 12
75536a6f88SJanosch Frank 
760fce5623SAvi Kivity MODULE_AUTHOR("Qumranet");
770fce5623SAvi Kivity MODULE_LICENSE("GPL");
780fce5623SAvi Kivity 
79920552b2SDavid Hildenbrand /* Architectures should define their poll value according to the halt latency */
80ec76d819SSuraj Jitindar Singh unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
81039c5d1bSRoman Storozhenko module_param(halt_poll_ns, uint, 0644);
82ec76d819SSuraj Jitindar Singh EXPORT_SYMBOL_GPL(halt_poll_ns);
83f7819512SPaolo Bonzini 
84aca6ff29SWanpeng Li /* Default doubles per-vcpu halt_poll_ns. */
85ec76d819SSuraj Jitindar Singh unsigned int halt_poll_ns_grow = 2;
86039c5d1bSRoman Storozhenko module_param(halt_poll_ns_grow, uint, 0644);
87ec76d819SSuraj Jitindar Singh EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
88aca6ff29SWanpeng Li 
8949113d36SNir Weiner /* The start value to grow halt_poll_ns from */
9049113d36SNir Weiner unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
9149113d36SNir Weiner module_param(halt_poll_ns_grow_start, uint, 0644);
9249113d36SNir Weiner EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
9349113d36SNir Weiner 
94aca6ff29SWanpeng Li /* Default resets per-vcpu halt_poll_ns . */
95ec76d819SSuraj Jitindar Singh unsigned int halt_poll_ns_shrink;
96039c5d1bSRoman Storozhenko module_param(halt_poll_ns_shrink, uint, 0644);
97ec76d819SSuraj Jitindar Singh EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
98aca6ff29SWanpeng Li 
99fa40a821SMarcelo Tosatti /*
100fa40a821SMarcelo Tosatti  * Ordering of locks:
101fa40a821SMarcelo Tosatti  *
102fae3a353SSheng Yang  *	kvm->lock --> kvm->slots_lock --> kvm->irq_lock
103fa40a821SMarcelo Tosatti  */
104fa40a821SMarcelo Tosatti 
1050d9ce162SJunaid Shahid DEFINE_MUTEX(kvm_lock);
1060fce5623SAvi Kivity LIST_HEAD(vm_list);
1070fce5623SAvi Kivity 
108aaba298cSSean Christopherson static struct kmem_cache *kvm_vcpu_cache;
1090fce5623SAvi Kivity 
1100fce5623SAvi Kivity static __read_mostly struct preempt_ops kvm_preempt_ops;
1117495e22bSPaolo Bonzini static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
1120fce5623SAvi Kivity 
11376f7c879SHollis Blanchard struct dentry *kvm_debugfs_dir;
114e23a808bSPaul Mackerras EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
1150fce5623SAvi Kivity 
11609cbcef6SMilan Pandurov static const struct file_operations stat_fops_per_vm;
117536a6f88SJanosch Frank 
1185f6de5cbSDavid Matlack static struct file_operations kvm_chardev_ops;
1195f6de5cbSDavid Matlack 
1200fce5623SAvi Kivity static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
1210fce5623SAvi Kivity 			   unsigned long arg);
122de8e5d74SChristian Borntraeger #ifdef CONFIG_KVM_COMPAT
1231dda606cSAlexander Graf static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
1241dda606cSAlexander Graf 				  unsigned long arg);
1257ddfd3e0SMarc Zyngier #define KVM_COMPAT(c)	.compat_ioctl	= (c)
1267ddfd3e0SMarc Zyngier #else
1279cb09e7cSMarc Zyngier /*
1289cb09e7cSMarc Zyngier  * For architectures that don't implement a compat infrastructure,
1299cb09e7cSMarc Zyngier  * adopt a double line of defense:
1309cb09e7cSMarc Zyngier  * - Prevent a compat task from opening /dev/kvm
1319cb09e7cSMarc Zyngier  * - If the open has been done by a 64bit task, and the KVM fd
1329cb09e7cSMarc Zyngier  *   passed to a compat task, let the ioctls fail.
1339cb09e7cSMarc Zyngier  */
kvm_no_compat_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)1347ddfd3e0SMarc Zyngier static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
1357ddfd3e0SMarc Zyngier 				unsigned long arg) { return -EINVAL; }
136b9876e6dSMarc Zyngier 
kvm_no_compat_open(struct inode * inode,struct file * file)137b9876e6dSMarc Zyngier static int kvm_no_compat_open(struct inode *inode, struct file *file)
138b9876e6dSMarc Zyngier {
139b9876e6dSMarc Zyngier 	return is_compat_task() ? -ENODEV : 0;
140b9876e6dSMarc Zyngier }
141b9876e6dSMarc Zyngier #define KVM_COMPAT(c)	.compat_ioctl	= kvm_no_compat_ioctl,	\
142b9876e6dSMarc Zyngier 			.open		= kvm_no_compat_open
1431dda606cSAlexander Graf #endif
14410474ae8SAlexander Graf static int hardware_enable_all(void);
14510474ae8SAlexander Graf static void hardware_disable_all(void);
1460fce5623SAvi Kivity 
147e93f8a0fSMarcelo Tosatti static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
1487940876eSStephen Hemminger 
149286de8f6SClaudio Imbrenda #define KVM_EVENT_CREATE_VM 0
150286de8f6SClaudio Imbrenda #define KVM_EVENT_DESTROY_VM 1
151286de8f6SClaudio Imbrenda static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
152286de8f6SClaudio Imbrenda static unsigned long long kvm_createvm_count;
153286de8f6SClaudio Imbrenda static unsigned long long kvm_active_vms;
154286de8f6SClaudio Imbrenda 
155baff59ccSVitaly Kuznetsov static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
156baff59ccSVitaly Kuznetsov 
kvm_arch_guest_memory_reclaimed(struct kvm * kvm)157683412ccSMingwei Zhang __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
158683412ccSMingwei Zhang {
159683412ccSMingwei Zhang }
160683412ccSMingwei Zhang 
kvm_is_zone_device_page(struct page * page)161284dc493SSean Christopherson bool kvm_is_zone_device_page(struct page *page)
162a78986aaSSean Christopherson {
163a78986aaSSean Christopherson 	/*
164a78986aaSSean Christopherson 	 * The metadata used by is_zone_device_page() to determine whether or
165a78986aaSSean Christopherson 	 * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
166a78986aaSSean Christopherson 	 * the device has been pinned, e.g. by get_user_pages().  WARN if the
167a78986aaSSean Christopherson 	 * page_count() is zero to help detect bad usage of this helper.
168a78986aaSSean Christopherson 	 */
169284dc493SSean Christopherson 	if (WARN_ON_ONCE(!page_count(page)))
170a78986aaSSean Christopherson 		return false;
171a78986aaSSean Christopherson 
172284dc493SSean Christopherson 	return is_zone_device_page(page);
173a78986aaSSean Christopherson }
174a78986aaSSean Christopherson 
175b14b2690SSean Christopherson /*
176b14b2690SSean Christopherson  * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
177b14b2690SSean Christopherson  * page, NULL otherwise.  Note, the list of refcounted PG_reserved page types
178b14b2690SSean Christopherson  * is likely incomplete, it has been compiled purely through people wanting to
179b14b2690SSean Christopherson  * back guest with a certain type of memory and encountering issues.
180b14b2690SSean Christopherson  */
kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)181b14b2690SSean Christopherson struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
182cbff90a7SBen-Ami Yassour {
183b14b2690SSean Christopherson 	struct page *page;
184b14b2690SSean Christopherson 
185b14b2690SSean Christopherson 	if (!pfn_valid(pfn))
186b14b2690SSean Christopherson 		return NULL;
187b14b2690SSean Christopherson 
188b14b2690SSean Christopherson 	page = pfn_to_page(pfn);
189b14b2690SSean Christopherson 	if (!PageReserved(page))
190b14b2690SSean Christopherson 		return page;
191b14b2690SSean Christopherson 
192b14b2690SSean Christopherson 	/* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
193b14b2690SSean Christopherson 	if (is_zero_pfn(pfn))
194b14b2690SSean Christopherson 		return page;
195b14b2690SSean Christopherson 
196a78986aaSSean Christopherson 	/*
197a78986aaSSean Christopherson 	 * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
198a78986aaSSean Christopherson 	 * perspective they are "normal" pages, albeit with slightly different
199a78986aaSSean Christopherson 	 * usage rules.
200a78986aaSSean Christopherson 	 */
201b14b2690SSean Christopherson 	if (kvm_is_zone_device_page(page))
202b14b2690SSean Christopherson 		return page;
203cbff90a7SBen-Ami Yassour 
204b14b2690SSean Christopherson 	return NULL;
205cbff90a7SBen-Ami Yassour }
206cbff90a7SBen-Ami Yassour 
2070fce5623SAvi Kivity /*
2080fce5623SAvi Kivity  * Switches to specified vcpu, until a matching vcpu_put()
2090fce5623SAvi Kivity  */
vcpu_load(struct kvm_vcpu * vcpu)210ec7660ccSChristoffer Dall void vcpu_load(struct kvm_vcpu *vcpu)
2110fce5623SAvi Kivity {
212ec7660ccSChristoffer Dall 	int cpu = get_cpu();
2137495e22bSPaolo Bonzini 
2147495e22bSPaolo Bonzini 	__this_cpu_write(kvm_running_vcpu, vcpu);
2150fce5623SAvi Kivity 	preempt_notifier_register(&vcpu->preempt_notifier);
2160fce5623SAvi Kivity 	kvm_arch_vcpu_load(vcpu, cpu);
2170fce5623SAvi Kivity 	put_cpu();
2180fce5623SAvi Kivity }
2192f1fe811SJim Mattson EXPORT_SYMBOL_GPL(vcpu_load);
2200fce5623SAvi Kivity 
vcpu_put(struct kvm_vcpu * vcpu)2210fce5623SAvi Kivity void vcpu_put(struct kvm_vcpu *vcpu)
2220fce5623SAvi Kivity {
2230fce5623SAvi Kivity 	preempt_disable();
2240fce5623SAvi Kivity 	kvm_arch_vcpu_put(vcpu);
2250fce5623SAvi Kivity 	preempt_notifier_unregister(&vcpu->preempt_notifier);
2267495e22bSPaolo Bonzini 	__this_cpu_write(kvm_running_vcpu, NULL);
2270fce5623SAvi Kivity 	preempt_enable();
2280fce5623SAvi Kivity }
2292f1fe811SJim Mattson EXPORT_SYMBOL_GPL(vcpu_put);
2300fce5623SAvi Kivity 
2317a97cec2SPaolo Bonzini /* TODO: merge with kvm_arch_vcpu_should_kick */
kvm_request_needs_ipi(struct kvm_vcpu * vcpu,unsigned req)2327a97cec2SPaolo Bonzini static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
2337a97cec2SPaolo Bonzini {
2347a97cec2SPaolo Bonzini 	int mode = kvm_vcpu_exiting_guest_mode(vcpu);
2357a97cec2SPaolo Bonzini 
2367a97cec2SPaolo Bonzini 	/*
2377a97cec2SPaolo Bonzini 	 * We need to wait for the VCPU to reenable interrupts and get out of
2387a97cec2SPaolo Bonzini 	 * READING_SHADOW_PAGE_TABLES mode.
2397a97cec2SPaolo Bonzini 	 */
2407a97cec2SPaolo Bonzini 	if (req & KVM_REQUEST_WAIT)
2417a97cec2SPaolo Bonzini 		return mode != OUTSIDE_GUEST_MODE;
2427a97cec2SPaolo Bonzini 
2437a97cec2SPaolo Bonzini 	/*
2447a97cec2SPaolo Bonzini 	 * Need to kick a running VCPU, but otherwise there is nothing to do.
2457a97cec2SPaolo Bonzini 	 */
2467a97cec2SPaolo Bonzini 	return mode == IN_GUEST_MODE;
2477a97cec2SPaolo Bonzini }
2487a97cec2SPaolo Bonzini 
ack_kick(void * _completed)249f24b44e4SLai Jiangshan static void ack_kick(void *_completed)
2500fce5623SAvi Kivity {
2510fce5623SAvi Kivity }
2520fce5623SAvi Kivity 
kvm_kick_many_cpus(struct cpumask * cpus,bool wait)253620b2438SVitaly Kuznetsov static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
254b49defe8SPaolo Bonzini {
255b49defe8SPaolo Bonzini 	if (cpumask_empty(cpus))
256b49defe8SPaolo Bonzini 		return false;
257b49defe8SPaolo Bonzini 
258f24b44e4SLai Jiangshan 	smp_call_function_many(cpus, ack_kick, NULL, wait);
259b49defe8SPaolo Bonzini 	return true;
260b49defe8SPaolo Bonzini }
261b49defe8SPaolo Bonzini 
kvm_make_vcpu_request(struct kvm_vcpu * vcpu,unsigned int req,struct cpumask * tmp,int current_cpu)262b56bd8e0SJinrong Liang static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
263b56bd8e0SJinrong Liang 				  struct cpumask *tmp, int current_cpu)
2640fce5623SAvi Kivity {
265ae0946cdSVitaly Kuznetsov 	int cpu;
2667053df4eSVitaly Kuznetsov 
267df06dae3SSean Christopherson 	if (likely(!(req & KVM_REQUEST_NO_ACTION)))
268df06dae3SSean Christopherson 		__kvm_make_request(req, vcpu);
2696b7e2d09SXiao Guangrong 
270178f02ffSRadim Krčmář 	if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
271ae0946cdSVitaly Kuznetsov 		return;
2726c6e8360SRadim Krčmář 
27385b64045SSean Christopherson 	/*
274ae0946cdSVitaly Kuznetsov 	 * Note, the vCPU could get migrated to a different pCPU at any point
275ae0946cdSVitaly Kuznetsov 	 * after kvm_request_needs_ipi(), which could result in sending an IPI
276ae0946cdSVitaly Kuznetsov 	 * to the previous pCPU.  But, that's OK because the purpose of the IPI
277ae0946cdSVitaly Kuznetsov 	 * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
278ae0946cdSVitaly Kuznetsov 	 * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
279ae0946cdSVitaly Kuznetsov 	 * after this point is also OK, as the requirement is only that KVM wait
280ae0946cdSVitaly Kuznetsov 	 * for vCPUs that were reading SPTEs _before_ any changes were
281ae0946cdSVitaly Kuznetsov 	 * finalized. See kvm_vcpu_kick() for more details on handling requests.
28285b64045SSean Christopherson 	 */
2830bbc2ca8SSean Christopherson 	if (kvm_request_needs_ipi(vcpu, req)) {
28485b64045SSean Christopherson 		cpu = READ_ONCE(vcpu->cpu);
285ae0946cdSVitaly Kuznetsov 		if (cpu != -1 && cpu != current_cpu)
2867053df4eSVitaly Kuznetsov 			__cpumask_set_cpu(cpu, tmp);
2870fce5623SAvi Kivity 	}
28885b64045SSean Christopherson }
2897053df4eSVitaly Kuznetsov 
kvm_make_vcpus_request_mask(struct kvm * kvm,unsigned int req,unsigned long * vcpu_bitmap)290ae0946cdSVitaly Kuznetsov bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
291620b2438SVitaly Kuznetsov 				 unsigned long *vcpu_bitmap)
292ae0946cdSVitaly Kuznetsov {
293ae0946cdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu;
294620b2438SVitaly Kuznetsov 	struct cpumask *cpus;
295ae0946cdSVitaly Kuznetsov 	int i, me;
296ae0946cdSVitaly Kuznetsov 	bool called;
297ae0946cdSVitaly Kuznetsov 
298ae0946cdSVitaly Kuznetsov 	me = get_cpu();
299ae0946cdSVitaly Kuznetsov 
300620b2438SVitaly Kuznetsov 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
301620b2438SVitaly Kuznetsov 	cpumask_clear(cpus);
302620b2438SVitaly Kuznetsov 
303ae0946cdSVitaly Kuznetsov 	for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
304ae0946cdSVitaly Kuznetsov 		vcpu = kvm_get_vcpu(kvm, i);
305381cecc5SVitaly Kuznetsov 		if (!vcpu)
306ae0946cdSVitaly Kuznetsov 			continue;
307b56bd8e0SJinrong Liang 		kvm_make_vcpu_request(vcpu, req, cpus, me);
308ae0946cdSVitaly Kuznetsov 	}
309ae0946cdSVitaly Kuznetsov 
310620b2438SVitaly Kuznetsov 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
3113cba4130SXiao Guangrong 	put_cpu();
3127053df4eSVitaly Kuznetsov 
3137053df4eSVitaly Kuznetsov 	return called;
3147053df4eSVitaly Kuznetsov }
3157053df4eSVitaly Kuznetsov 
kvm_make_all_cpus_request_except(struct kvm * kvm,unsigned int req,struct kvm_vcpu * except)31654163a34SSuravee Suthikulpanit bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
31754163a34SSuravee Suthikulpanit 				      struct kvm_vcpu *except)
3187053df4eSVitaly Kuznetsov {
319ae0946cdSVitaly Kuznetsov 	struct kvm_vcpu *vcpu;
320baff59ccSVitaly Kuznetsov 	struct cpumask *cpus;
32146808a4cSMarc Zyngier 	unsigned long i;
3227053df4eSVitaly Kuznetsov 	bool called;
32346808a4cSMarc Zyngier 	int me;
3247053df4eSVitaly Kuznetsov 
325ae0946cdSVitaly Kuznetsov 	me = get_cpu();
326ae0946cdSVitaly Kuznetsov 
327baff59ccSVitaly Kuznetsov 	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
328baff59ccSVitaly Kuznetsov 	cpumask_clear(cpus);
329baff59ccSVitaly Kuznetsov 
330ae0946cdSVitaly Kuznetsov 	kvm_for_each_vcpu(i, vcpu, kvm) {
331ae0946cdSVitaly Kuznetsov 		if (vcpu == except)
332ae0946cdSVitaly Kuznetsov 			continue;
333b56bd8e0SJinrong Liang 		kvm_make_vcpu_request(vcpu, req, cpus, me);
334ae0946cdSVitaly Kuznetsov 	}
335ae0946cdSVitaly Kuznetsov 
336ae0946cdSVitaly Kuznetsov 	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
337ae0946cdSVitaly Kuznetsov 	put_cpu();
3387053df4eSVitaly Kuznetsov 
33949846896SRusty Russell 	return called;
34049846896SRusty Russell }
34149846896SRusty Russell 
kvm_make_all_cpus_request(struct kvm * kvm,unsigned int req)34254163a34SSuravee Suthikulpanit bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
34354163a34SSuravee Suthikulpanit {
34454163a34SSuravee Suthikulpanit 	return kvm_make_all_cpus_request_except(kvm, req, NULL);
34554163a34SSuravee Suthikulpanit }
346a2486020SMarcelo Tosatti EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
34754163a34SSuravee Suthikulpanit 
kvm_flush_remote_tlbs(struct kvm * kvm)348a6d51016SMario Smarduch void kvm_flush_remote_tlbs(struct kvm *kvm)
34949846896SRusty Russell {
35049846896SRusty Russell 	++kvm->stat.generic.remote_tlb_flush_requests;
3513cc4e148SJing Zhang 
3526bc6db00SLai Jiangshan 	/*
3534ae3cb3aSLan Tianyu 	 * We want to publish modifications to the page tables before reading
3544ae3cb3aSLan Tianyu 	 * mode. Pairs with a memory barrier in arch-specific code.
3554ae3cb3aSLan Tianyu 	 * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
3564ae3cb3aSLan Tianyu 	 * and smp_mb in walk_shadow_page_lockless_begin/end.
3574ae3cb3aSLan Tianyu 	 * - powerpc: smp_mb in kvmppc_prepare_to_enter.
3584ae3cb3aSLan Tianyu 	 *
3594ae3cb3aSLan Tianyu 	 * There is already an smp_mb__after_atomic() before
3604ae3cb3aSLan Tianyu 	 * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
3614ae3cb3aSLan Tianyu 	 * barrier here.
3624ae3cb3aSLan Tianyu 	 */
3634ae3cb3aSLan Tianyu 	if (!kvm_arch_flush_remote_tlbs(kvm)
364b08660e5STianyu Lan 	    || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
365b08660e5STianyu Lan 		++kvm->stat.generic.remote_tlb_flush;
3660193cc90SJing Zhang }
3670fce5623SAvi Kivity EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
3682ba9f0d8SAneesh Kumar K.V 
kvm_flush_remote_tlbs_range(struct kvm * kvm,gfn_t gfn,u64 nr_pages)369a6d51016SMario Smarduch void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
3700fce5623SAvi Kivity {
371683412ccSMingwei Zhang 	if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
372683412ccSMingwei Zhang 		return;
373683412ccSMingwei Zhang 
374683412ccSMingwei Zhang 	/*
375683412ccSMingwei Zhang 	 * Fall back to a flushing entire TLBs if the architecture range-based
376683412ccSMingwei Zhang 	 * TLB invalidation is unsupported or can't be performed for whatever
3776926f95aSSean Christopherson 	 * reason.
3786926f95aSSean Christopherson 	 */
3796926f95aSSean Christopherson 	kvm_flush_remote_tlbs(kvm);
3806926f95aSSean Christopherson }
3816926f95aSSean Christopherson 
kvm_flush_remote_tlbs_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)3826926f95aSSean Christopherson void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
3836926f95aSSean Christopherson 				   const struct kvm_memory_slot *memslot)
3846926f95aSSean Christopherson {
3856926f95aSSean Christopherson 	/*
3866926f95aSSean Christopherson 	 * All current use cases for flushing the TLBs for a specific memslot
3876926f95aSSean Christopherson 	 * are related to dirty logging, and many do the TLB flush out of
3886926f95aSSean Christopherson 	 * mmu_lock. The interaction between the various operations on memslot
389837f66c7SDavid Matlack 	 * must be serialized by slots_locks to ensure the TLB flush from one
3906926f95aSSean Christopherson 	 * operation is observed by any other operation on the same memslot.
39163f4b210SPaolo Bonzini 	 */
3926926f95aSSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
3936926f95aSSean Christopherson 	kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
3946926f95aSSean Christopherson }
3956926f95aSSean Christopherson 
kvm_flush_shadow_all(struct kvm * kvm)396837f66c7SDavid Matlack static void kvm_flush_shadow_all(struct kvm *kvm)
397837f66c7SDavid Matlack {
398837f66c7SDavid Matlack 	kvm_arch_flush_shadow_all(kvm);
399837f66c7SDavid Matlack 	kvm_arch_guest_memory_reclaimed(kvm);
400837f66c7SDavid Matlack }
401837f66c7SDavid Matlack 
402837f66c7SDavid Matlack #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache * mc,gfp_t gfp_flags)403837f66c7SDavid Matlack static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
404837f66c7SDavid Matlack 					       gfp_t gfp_flags)
405837f66c7SDavid Matlack {
406837f66c7SDavid Matlack 	gfp_flags |= mc->gfp_zero;
407837f66c7SDavid Matlack 
408837f66c7SDavid Matlack 	if (mc->kmem_cache)
409837f66c7SDavid Matlack 		return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
410837f66c7SDavid Matlack 	else
411837f66c7SDavid Matlack 		return (void *)__get_free_page(gfp_flags);
412837f66c7SDavid Matlack }
413837f66c7SDavid Matlack 
__kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int capacity,int min)4146926f95aSSean Christopherson int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
4156926f95aSSean Christopherson {
4166926f95aSSean Christopherson 	gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
4176926f95aSSean Christopherson 	void *obj;
4186926f95aSSean Christopherson 
4196926f95aSSean Christopherson 	if (mc->nobjs >= min)
4206926f95aSSean Christopherson 		return 0;
421837f66c7SDavid Matlack 
422837f66c7SDavid Matlack 	if (unlikely(!mc->objects)) {
423837f66c7SDavid Matlack 		if (WARN_ON_ONCE(!capacity))
424837f66c7SDavid Matlack 			return -EIO;
425837f66c7SDavid Matlack 
4266926f95aSSean Christopherson 		mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
4276926f95aSSean Christopherson 		if (!mc->objects)
4286926f95aSSean Christopherson 			return -ENOMEM;
4296926f95aSSean Christopherson 
4306926f95aSSean Christopherson 		mc->capacity = capacity;
4316926f95aSSean Christopherson 	}
4326926f95aSSean Christopherson 
4336926f95aSSean Christopherson 	/* It is illegal to request a different capacity across topups. */
4346926f95aSSean Christopherson 	if (WARN_ON_ONCE(mc->capacity != capacity))
4356926f95aSSean Christopherson 		return -EIO;
4366926f95aSSean Christopherson 
4376926f95aSSean Christopherson 	while (mc->nobjs < mc->capacity) {
4386926f95aSSean Christopherson 		obj = mmu_memory_cache_alloc_obj(mc, gfp);
439837f66c7SDavid Matlack 		if (!obj)
440837f66c7SDavid Matlack 			return mc->nobjs >= min ? 0 : -ENOMEM;
441837f66c7SDavid Matlack 		mc->objects[mc->nobjs++] = obj;
442837f66c7SDavid Matlack 	}
443837f66c7SDavid Matlack 	return 0;
4446926f95aSSean Christopherson }
4456926f95aSSean Christopherson 
kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache * mc,int min)4466926f95aSSean Christopherson int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
4476926f95aSSean Christopherson {
4486926f95aSSean Christopherson 	return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
4496926f95aSSean Christopherson }
4506926f95aSSean Christopherson 
kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache * mc)4516926f95aSSean Christopherson int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
4526926f95aSSean Christopherson {
4536926f95aSSean Christopherson 	return mc->nobjs;
4546926f95aSSean Christopherson }
4556926f95aSSean Christopherson 
kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache * mc)4566926f95aSSean Christopherson void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
4576926f95aSSean Christopherson {
4586926f95aSSean Christopherson 	while (mc->nobjs) {
4598bd826d6SSean Christopherson 		if (mc->kmem_cache)
4600fce5623SAvi Kivity 			kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
4610fce5623SAvi Kivity 		else
4620fce5623SAvi Kivity 			free_page((unsigned long)mc->objects[--mc->nobjs]);
4630fce5623SAvi Kivity 	}
4640fce5623SAvi Kivity 
46534bb10b7SRik van Riel 	kvfree(mc->objects);
466510958e9SSean Christopherson 
467da4ad88cSDavidlohr Bueso 	mc->objects = NULL;
468510958e9SSean Christopherson 	mc->capacity = 0;
469af585b92SGleb Natapov }
4700fce5623SAvi Kivity 
kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache * mc)4714c088493SRaghavendra K T void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
4724c088493SRaghavendra K T {
4733a08a8f9SRaghavendra K T 	void *p;
474d73eb57bSWanpeng Li 
475d5c48debSSean Christopherson 	if (WARN_ON(!mc->nobjs))
476a54d8066SMaciej S. Szmigiero 		p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
47758fc1166SOliver Upton 	else
47858fc1166SOliver Upton 		p = mc->objects[--mc->nobjs];
47958fc1166SOliver Upton 	BUG_ON(!p);
48058fc1166SOliver Upton 	return p;
4810fce5623SAvi Kivity }
4820fce5623SAvi Kivity #endif
48327592ae8SMarc Zyngier 
kvm_vcpu_init(struct kvm_vcpu * vcpu,struct kvm * kvm,unsigned id)4844543bdc0SSean Christopherson static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
4854543bdc0SSean Christopherson {
4865593473aSPaolo Bonzini 	mutex_init(&vcpu->mutex);
487e529ef66SSean Christopherson 	vcpu->cpu = -1;
4889941d224SSean Christopherson 	vcpu->kvm = kvm;
4899941d224SSean Christopherson 	vcpu->vcpu_id = id;
4909941d224SSean Christopherson 	vcpu->pid = NULL;
4919941d224SSean Christopherson #ifndef __KVM_HAVE_ARCH_WQP
4929941d224SSean Christopherson 	rcuwait_init(&vcpu->wait);
4939941d224SSean Christopherson #endif
4949941d224SSean Christopherson 	kvm_async_pf_vcpu_init(vcpu);
4958bd826d6SSean Christopherson 
496e529ef66SSean Christopherson 	kvm_vcpu_set_in_spin_loop(vcpu, false);
4974543bdc0SSean Christopherson 	kvm_vcpu_set_dy_eligible(vcpu, false);
49827592ae8SMarc Zyngier 	vcpu->preempted = false;
49927592ae8SMarc Zyngier 	vcpu->ready = false;
50027592ae8SMarc Zyngier 	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
50146808a4cSMarc Zyngier 	vcpu->last_used_slot = NULL;
50227592ae8SMarc Zyngier 
50327592ae8SMarc Zyngier 	/* Fill the stats id string for the vcpu */
50427592ae8SMarc Zyngier 	snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
50527592ae8SMarc Zyngier 		 task_pid_nr(current), id);
506c5b07754SMarc Zyngier }
50727592ae8SMarc Zyngier 
kvm_vcpu_destroy(struct kvm_vcpu * vcpu)50827592ae8SMarc Zyngier static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
50927592ae8SMarc Zyngier {
51027592ae8SMarc Zyngier 	kvm_arch_vcpu_destroy(vcpu);
51127592ae8SMarc Zyngier 	kvm_dirty_ring_free(&vcpu->dirty_ring);
5124543bdc0SSean Christopherson 
513e930bffeSAndrea Arcangeli 	/*
514e930bffeSAndrea Arcangeli 	 * No need for rcu_read_lock as VCPU_RUN is the only place that changes
515e930bffeSAndrea Arcangeli 	 * the vcpu->pid pointer, and at destruction time all file descriptors
516e930bffeSAndrea Arcangeli 	 * are already gone.
517e930bffeSAndrea Arcangeli 	 */
518e930bffeSAndrea Arcangeli 	put_pid(rcu_dereference_protected(vcpu->pid, 1));
5193039bcc7SSean Christopherson 
5203039bcc7SSean Christopherson 	free_page((unsigned long)vcpu->run);
521f922bd9bSSean Christopherson 	kmem_cache_free(kvm_vcpu_cache, vcpu);
522f922bd9bSSean Christopherson }
523f922bd9bSSean Christopherson 
kvm_destroy_vcpus(struct kvm * kvm)524683412ccSMingwei Zhang void kvm_destroy_vcpus(struct kvm *kvm)
525683412ccSMingwei Zhang {
5263039bcc7SSean Christopherson 	unsigned long i;
5273039bcc7SSean Christopherson 	struct kvm_vcpu *vcpu;
5283039bcc7SSean Christopherson 
5293039bcc7SSean Christopherson 	kvm_for_each_vcpu(i, vcpu, kvm) {
5303039bcc7SSean Christopherson 		kvm_vcpu_destroy(vcpu);
531f922bd9bSSean Christopherson 		xa_erase(&kvm->vcpu_array, i);
532683412ccSMingwei Zhang 	}
5333039bcc7SSean Christopherson 
5343039bcc7SSean Christopherson 	atomic_set(&kvm->online_vcpus, 0);
5353039bcc7SSean Christopherson }
5363039bcc7SSean Christopherson EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
537f922bd9bSSean Christopherson 
538f922bd9bSSean Christopherson #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_to_kvm(struct mmu_notifier * mn)539f922bd9bSSean Christopherson static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
540f922bd9bSSean Christopherson {
541f922bd9bSSean Christopherson 	return container_of(mn, struct kvm, mmu_notifier);
542f922bd9bSSean Christopherson }
543f922bd9bSSean Christopherson 
544f922bd9bSSean Christopherson typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
545f922bd9bSSean Christopherson 
546f922bd9bSSean Christopherson typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
547f922bd9bSSean Christopherson 			     unsigned long end);
548f922bd9bSSean Christopherson 
549f922bd9bSSean Christopherson typedef void (*on_unlock_fn_t)(struct kvm *kvm);
550ed922739SMaciej S. Szmigiero 
551ed922739SMaciej S. Szmigiero struct kvm_hva_range {
552ed922739SMaciej S. Szmigiero 	unsigned long start;
553ed922739SMaciej S. Szmigiero 	unsigned long end;
554ed922739SMaciej S. Szmigiero 	union kvm_mmu_notifier_arg arg;
555ed922739SMaciej S. Szmigiero 	hva_handler_t handler;
5563039bcc7SSean Christopherson 	on_lock_fn_t on_lock;
5573039bcc7SSean Christopherson 	on_unlock_fn_t on_unlock;
5583039bcc7SSean Christopherson 	bool flush_on_ret;
5598931a454SSean Christopherson 	bool may_block;
560f922bd9bSSean Christopherson };
5613039bcc7SSean Christopherson 
5623039bcc7SSean Christopherson /*
5633039bcc7SSean Christopherson  * Use a dedicated stub instead of NULL to indicate that there is no callback
5643039bcc7SSean Christopherson  * function/handler.  The compiler technically can't guarantee that a real
565ed922739SMaciej S. Szmigiero  * function will have a non-zero address, and so it will generate code to
566ed922739SMaciej S. Szmigiero  * check for !NULL, whereas comparing against a stub will be elided at compile
567ed922739SMaciej S. Szmigiero  * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
568f922bd9bSSean Christopherson  */
kvm_null_fn(void)569f922bd9bSSean Christopherson static void kvm_null_fn(void)
570f922bd9bSSean Christopherson {
571f922bd9bSSean Christopherson 
572f922bd9bSSean Christopherson }
5733039bcc7SSean Christopherson #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
5743039bcc7SSean Christopherson 
5753039bcc7SSean Christopherson static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
576ed922739SMaciej S. Szmigiero 
577ed922739SMaciej S. Szmigiero /* Iterate over each memslot intersecting [start, last] (inclusive) range */
5783039bcc7SSean Christopherson #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)	     \
579ed922739SMaciej S. Szmigiero 	for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
580ed922739SMaciej S. Szmigiero 	     node;							     \
5813039bcc7SSean Christopherson 	     node = interval_tree_iter_next(node, start, last))	     \
5823039bcc7SSean Christopherson 
__kvm_handle_hva_range(struct kvm * kvm,const struct kvm_hva_range * range)583a54d8066SMaciej S. Szmigiero static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
5843039bcc7SSean Christopherson 						  const struct kvm_hva_range *range)
5853039bcc7SSean Christopherson {
5863039bcc7SSean Christopherson 	bool ret = false, locked = false;
5873039bcc7SSean Christopherson 	struct kvm_gfn_range gfn_range;
5883039bcc7SSean Christopherson 	struct kvm_memory_slot *slot;
5893039bcc7SSean Christopherson 	struct kvm_memslots *slots;
5903039bcc7SSean Christopherson 	int i, idx;
5913039bcc7SSean Christopherson 
5923039bcc7SSean Christopherson 	if (WARN_ON_ONCE(range->end <= range->start))
5933039bcc7SSean Christopherson 		return 0;
5943039bcc7SSean Christopherson 
5953039bcc7SSean Christopherson 	/* A null handler is allowed if and only if on_lock() is provided. */
5963039bcc7SSean Christopherson 	if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
5973039bcc7SSean Christopherson 			 IS_KVM_NULL_FN(range->handler)))
5983039bcc7SSean Christopherson 		return 0;
5993039bcc7SSean Christopherson 
6003039bcc7SSean Christopherson 	idx = srcu_read_lock(&kvm->srcu);
6013039bcc7SSean Christopherson 
6023039bcc7SSean Christopherson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6033039bcc7SSean Christopherson 		struct interval_tree_node *node;
6043039bcc7SSean Christopherson 
6058931a454SSean Christopherson 		slots = __kvm_memslots(kvm, i);
6068931a454SSean Christopherson 		kvm_for_each_memslot_in_hva_range(node, slots,
6078931a454SSean Christopherson 						  range->start, range->end - 1) {
608071064f1SPaolo Bonzini 			unsigned long hva_start, hva_end;
609071064f1SPaolo Bonzini 
610071064f1SPaolo Bonzini 			slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
611071064f1SPaolo Bonzini 			hva_start = max(range->start, slot->userspace_addr);
6128931a454SSean Christopherson 			hva_end = min(range->end, slot->userspace_addr +
6133039bcc7SSean Christopherson 						  (slot->npages << PAGE_SHIFT));
6143039bcc7SSean Christopherson 
6153039bcc7SSean Christopherson 			/*
6163039bcc7SSean Christopherson 			 * To optimize for the likely case where the address
6176bc6db00SLai Jiangshan 			 * range is covered by zero or one memslots, don't
6183039bcc7SSean Christopherson 			 * bother making these conditional (to avoid writes on
6193039bcc7SSean Christopherson 			 * the second or later invocation of the handler).
620683412ccSMingwei Zhang 			 */
621f922bd9bSSean Christopherson 			gfn_range.arg = range->arg;
622683412ccSMingwei Zhang 			gfn_range.may_block = range->may_block;
623683412ccSMingwei Zhang 
624683412ccSMingwei Zhang 			/*
625f922bd9bSSean Christopherson 			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
6263039bcc7SSean Christopherson 			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
6273039bcc7SSean Christopherson 			 */
6283039bcc7SSean Christopherson 			gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
6293039bcc7SSean Christopherson 			gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
6303039bcc7SSean Christopherson 			gfn_range.slot = slot;
6313039bcc7SSean Christopherson 
6323039bcc7SSean Christopherson 			if (!locked) {
6333039bcc7SSean Christopherson 				locked = true;
6343039bcc7SSean Christopherson 				KVM_MMU_LOCK(kvm);
6353039bcc7SSean Christopherson 				if (!IS_KVM_NULL_FN(range->on_lock))
6363039bcc7SSean Christopherson 					range->on_lock(kvm, range->start, range->end);
6373039bcc7SSean Christopherson 				if (IS_KVM_NULL_FN(range->handler))
6383039bcc7SSean Christopherson 					break;
6393039bcc7SSean Christopherson 			}
6403039bcc7SSean Christopherson 			ret |= range->handler(kvm, &gfn_range);
6413039bcc7SSean Christopherson 		}
6423039bcc7SSean Christopherson 	}
6433039bcc7SSean Christopherson 
644f922bd9bSSean Christopherson 	if (range->flush_on_ret && ret)
645683412ccSMingwei Zhang 		kvm_flush_remote_tlbs(kvm);
6463039bcc7SSean Christopherson 
6473039bcc7SSean Christopherson 	if (locked) {
6483039bcc7SSean Christopherson 		KVM_MMU_UNLOCK(kvm);
6493039bcc7SSean Christopherson 		if (!IS_KVM_NULL_FN(range->on_unlock))
650f922bd9bSSean Christopherson 			range->on_unlock(kvm);
6513039bcc7SSean Christopherson 	}
6523039bcc7SSean Christopherson 
6533039bcc7SSean Christopherson 	srcu_read_unlock(&kvm->srcu, idx);
6543039bcc7SSean Christopherson 
6553039bcc7SSean Christopherson 	/* The notifiers are averse to booleans. :-( */
6563039bcc7SSean Christopherson 	return (int)ret;
6573039bcc7SSean Christopherson }
6583039bcc7SSean Christopherson 
kvm_handle_hva_range(struct mmu_notifier * mn,unsigned long start,unsigned long end,union kvm_mmu_notifier_arg arg,hva_handler_t handler)6593039bcc7SSean Christopherson static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
6603039bcc7SSean Christopherson 						unsigned long start,
6613039bcc7SSean Christopherson 						unsigned long end,
6623039bcc7SSean Christopherson 						union kvm_mmu_notifier_arg arg,
6633039bcc7SSean Christopherson 						hva_handler_t handler)
664f922bd9bSSean Christopherson {
665683412ccSMingwei Zhang 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
6663039bcc7SSean Christopherson 	const struct kvm_hva_range range = {
6673039bcc7SSean Christopherson 		.start		= start,
6683039bcc7SSean Christopherson 		.end		= end,
6693039bcc7SSean Christopherson 		.arg		= arg,
670f922bd9bSSean Christopherson 		.handler	= handler,
6713039bcc7SSean Christopherson 		.on_lock	= (void *)kvm_null_fn,
6722230f9e1SGavin Shan 		.on_unlock	= (void *)kvm_null_fn,
6732230f9e1SGavin Shan 		.flush_on_ret	= true,
6742230f9e1SGavin Shan 		.may_block	= false,
6752230f9e1SGavin Shan 	};
6762230f9e1SGavin Shan 
6772230f9e1SGavin Shan 	return __kvm_handle_hva_range(kvm, &range);
6782230f9e1SGavin Shan }
6792230f9e1SGavin Shan 
kvm_handle_hva_range_no_flush(struct mmu_notifier * mn,unsigned long start,unsigned long end,hva_handler_t handler)6802230f9e1SGavin Shan static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
6812230f9e1SGavin Shan 							 unsigned long start,
6822230f9e1SGavin Shan 							 unsigned long end,
6832230f9e1SGavin Shan 							 hva_handler_t handler)
6842230f9e1SGavin Shan {
6852230f9e1SGavin Shan 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
6862230f9e1SGavin Shan 	const struct kvm_hva_range range = {
6872230f9e1SGavin Shan 		.start		= start,
6882230f9e1SGavin Shan 		.end		= end,
6892230f9e1SGavin Shan 		.handler	= handler,
6903da0dd43SIzik Eidus 		.on_lock	= (void *)kvm_null_fn,
6913da0dd43SIzik Eidus 		.on_unlock	= (void *)kvm_null_fn,
6923da0dd43SIzik Eidus 		.flush_on_ret	= false,
6933da0dd43SIzik Eidus 		.may_block	= false,
6943da0dd43SIzik Eidus 	};
6953da0dd43SIzik Eidus 
6963da0dd43SIzik Eidus 	return __kvm_handle_hva_range(kvm, &range);
697501b9185SSean Christopherson }
698501b9185SSean Christopherson 
kvm_change_spte_gfn(struct kvm * kvm,struct kvm_gfn_range * range)699c13fda23SSean Christopherson static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
70052ac8b35SPaolo Bonzini {
70120ec3ebdSChao Peng 	/*
70220ec3ebdSChao Peng 	 * Skipping invalid memslots is correct if and only change_pte() is
70320ec3ebdSChao Peng 	 * surrounded by invalidate_range_{start,end}(), which is currently
70420ec3ebdSChao Peng 	 * guaranteed by the primary MMU.  If that ever changes, KVM needs to
70520ec3ebdSChao Peng 	 * unmap the memslot instead of skipping the memslot to ensure that KVM
706c13fda23SSean Christopherson 	 * doesn't hold references to the old PFN.
70752ac8b35SPaolo Bonzini 	 */
70820ec3ebdSChao Peng 	WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
709071064f1SPaolo Bonzini 
710c13fda23SSean Christopherson 	if (range->slot->flags & KVM_MEMSLOT_INVALID)
7112230f9e1SGavin Shan 		return false;
7123da0dd43SIzik Eidus 
7133da0dd43SIzik Eidus 	return kvm_set_spte_gfn(kvm, range);
71420ec3ebdSChao Peng }
715f922bd9bSSean Christopherson 
kvm_mmu_notifier_change_pte(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address,pte_t pte)716e930bffeSAndrea Arcangeli static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
717e930bffeSAndrea Arcangeli 					struct mm_struct *mm,
718e930bffeSAndrea Arcangeli 					unsigned long address,
719e930bffeSAndrea Arcangeli 					pte_t pte)
720e930bffeSAndrea Arcangeli {
721e930bffeSAndrea Arcangeli 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
72220ec3ebdSChao Peng 	const union kvm_mmu_notifier_arg arg = { .pte = pte };
72320ec3ebdSChao Peng 
72420ec3ebdSChao Peng 	trace_kvm_set_spte_hva(address);
72520ec3ebdSChao Peng 
7264a42d848SDavid Stevens 	/*
7274a42d848SDavid Stevens 	 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
728a413a625STom Rix 	 * If mmu_invalidate_in_progress is zero, then no in-progress
7294a42d848SDavid Stevens 	 * invalidations, including this one, found a relevant memslot at
7304a42d848SDavid Stevens 	 * start(); rechecking memslots here is unnecessary.  Note, a false
7314a42d848SDavid Stevens 	 * positive (count elevated by a different invalidation) is sub-optimal
7324a42d848SDavid Stevens 	 * but functionally ok.
7334a42d848SDavid Stevens 	 */
7344a42d848SDavid Stevens 	WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
7354a42d848SDavid Stevens 	if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
73620ec3ebdSChao Peng 		return;
73720ec3ebdSChao Peng 
73820ec3ebdSChao Peng 	kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
73920ec3ebdSChao Peng }
740f922bd9bSSean Christopherson 
kvm_mmu_invalidate_begin(struct kvm * kvm,unsigned long start,unsigned long end)7414a42d848SDavid Stevens void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
7423039bcc7SSean Christopherson 			      unsigned long end)
743f922bd9bSSean Christopherson {
744f922bd9bSSean Christopherson 	/*
745f922bd9bSSean Christopherson 	 * The count increase must become visible at unlock time as no
746f922bd9bSSean Christopherson 	 * spte can be established without taking the mmu_lock and
747f922bd9bSSean Christopherson 	 * count is also read inside the mmu_lock critical section.
748f922bd9bSSean Christopherson 	 */
749f922bd9bSSean Christopherson 	kvm->mmu_invalidate_in_progress++;
750f922bd9bSSean Christopherson 	if (likely(kvm->mmu_invalidate_in_progress == 1)) {
751f922bd9bSSean Christopherson 		kvm->mmu_invalidate_range_start = start;
75220ec3ebdSChao Peng 		kvm->mmu_invalidate_range_end = end;
753683412ccSMingwei Zhang 	} else {
754f922bd9bSSean Christopherson 		/*
755f922bd9bSSean Christopherson 		 * Fully tracking multiple concurrent ranges has diminishing
756f922bd9bSSean Christopherson 		 * returns. Keep things simple and just find the minimal range
757565f3be2STakuya Yoshikawa 		 * which includes the current and new ranges. As there won't be
758f922bd9bSSean Christopherson 		 * enough information to subtract a range after its invalidate
759f922bd9bSSean Christopherson 		 * completes, any ranges invalidated concurrently will
76052ac8b35SPaolo Bonzini 		 * accumulate and persist until all outstanding invalidates
76152ac8b35SPaolo Bonzini 		 * complete.
76252ac8b35SPaolo Bonzini 		 */
76320ec3ebdSChao Peng 		kvm->mmu_invalidate_range_start =
76452ac8b35SPaolo Bonzini 			min(kvm->mmu_invalidate_range_start, start);
76552ac8b35SPaolo Bonzini 		kvm->mmu_invalidate_range_end =
76652ac8b35SPaolo Bonzini 			max(kvm->mmu_invalidate_range_end, end);
76752ac8b35SPaolo Bonzini 	}
76852ac8b35SPaolo Bonzini }
76952ac8b35SPaolo Bonzini 
kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,const struct mmu_notifier_range * range)77052ac8b35SPaolo Bonzini static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
77152ac8b35SPaolo Bonzini 					const struct mmu_notifier_range *range)
77258cd407cSSean Christopherson {
77358cd407cSSean Christopherson 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
77458cd407cSSean Christopherson 	const struct kvm_hva_range hva_range = {
77558cd407cSSean Christopherson 		.start		= range->start,
77658cd407cSSean Christopherson 		.end		= range->end,
77758cd407cSSean Christopherson 		.handler	= kvm_unmap_gfn_range,
77858cd407cSSean Christopherson 		.on_lock	= kvm_mmu_invalidate_begin,
77920ec3ebdSChao Peng 		.on_unlock	= kvm_arch_guest_memory_reclaimed,
78020ec3ebdSChao Peng 		.flush_on_ret	= true,
78158cd407cSSean Christopherson 		.may_block	= mmu_notifier_range_blockable(range),
782982ed0deSDavid Woodhouse 	};
783982ed0deSDavid Woodhouse 
784982ed0deSDavid Woodhouse 	trace_kvm_unmap_hva_range(range->start, range->end);
785f922bd9bSSean Christopherson 
78693065ac7SMichal Hocko 	/*
787e649b3f0SEiichi Tsukata 	 * Prevent memslot modification between range_start() and range_end()
788e930bffeSAndrea Arcangeli 	 * so that conditionally locking provides the same result in both
789e930bffeSAndrea Arcangeli 	 * functions.  Without that guarantee, the mmu_invalidate_in_progress
79020ec3ebdSChao Peng 	 * adjustments will be imbalanced.
791f922bd9bSSean Christopherson 	 *
792e930bffeSAndrea Arcangeli 	 * Pairs with the decrement in range_end().
793e930bffeSAndrea Arcangeli 	 */
794e930bffeSAndrea Arcangeli 	spin_lock(&kvm->mn_invalidate_lock);
795e930bffeSAndrea Arcangeli 	kvm->mn_active_invalidate_count++;
796e930bffeSAndrea Arcangeli 	spin_unlock(&kvm->mn_invalidate_lock);
797e930bffeSAndrea Arcangeli 
79820ec3ebdSChao Peng 	/*
799a355aa54SPaul Mackerras 	 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
800e930bffeSAndrea Arcangeli 	 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
801e930bffeSAndrea Arcangeli 	 * each cache's lock.  There are relatively few caches in existence at
802a355aa54SPaul Mackerras 	 * any given time, and the caches themselves can check for hva overlap,
80320ec3ebdSChao Peng 	 * i.e. don't need to rely on memslot overlap checks for performance.
804e930bffeSAndrea Arcangeli 	 * Because this runs without holding mmu_lock, the pfn caches must use
80520ec3ebdSChao Peng 	 * mn_active_invalidate_count (see above) instead of
806f922bd9bSSean Christopherson 	 * mmu_invalidate_in_progress.
807f922bd9bSSean Christopherson 	 */
808f922bd9bSSean Christopherson 	gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
809f922bd9bSSean Christopherson 					  hva_range.may_block);
810f922bd9bSSean Christopherson 
811f922bd9bSSean Christopherson 	__kvm_handle_hva_range(kvm, &hva_range);
812f922bd9bSSean Christopherson 
813f922bd9bSSean Christopherson 	return 0;
814f922bd9bSSean Christopherson }
815f922bd9bSSean Christopherson 
kvm_mmu_invalidate_end(struct kvm * kvm,unsigned long start,unsigned long end)816f922bd9bSSean Christopherson void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
81720ec3ebdSChao Peng 			    unsigned long end)
818683412ccSMingwei Zhang {
819f922bd9bSSean Christopherson 	/*
820f922bd9bSSean Christopherson 	 * This sequence increase will notify the kvm page fault that
821f922bd9bSSean Christopherson 	 * the page that is going to be mapped in the spte could have
82252ac8b35SPaolo Bonzini 	 * been freed.
823f922bd9bSSean Christopherson 	 */
824f922bd9bSSean Christopherson 	kvm->mmu_invalidate_seq++;
825e930bffeSAndrea Arcangeli 	smp_wmb();
82652ac8b35SPaolo Bonzini 	/*
82752ac8b35SPaolo Bonzini 	 * The above sequence increase must be visible before the
82852ac8b35SPaolo Bonzini 	 * below count decrease, which is ensured by the smp_wmb above
82952ac8b35SPaolo Bonzini 	 * in conjunction with the smp_rmb in mmu_invalidate_retry().
83052ac8b35SPaolo Bonzini 	 */
83152ac8b35SPaolo Bonzini 	kvm->mmu_invalidate_in_progress--;
83252ac8b35SPaolo Bonzini }
83352ac8b35SPaolo Bonzini 
kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,const struct mmu_notifier_range * range)83452ac8b35SPaolo Bonzini static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
83552ac8b35SPaolo Bonzini 					const struct mmu_notifier_range *range)
83652ac8b35SPaolo Bonzini {
83752ac8b35SPaolo Bonzini 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
83820ec3ebdSChao Peng 	const struct kvm_hva_range hva_range = {
839e930bffeSAndrea Arcangeli 		.start		= range->start,
840e930bffeSAndrea Arcangeli 		.end		= range->end,
841e930bffeSAndrea Arcangeli 		.handler	= (void *)kvm_null_fn,
842e930bffeSAndrea Arcangeli 		.on_lock	= kvm_mmu_invalidate_end,
84357128468SAndres Lagar-Cavilla 		.on_unlock	= (void *)kvm_null_fn,
84457128468SAndres Lagar-Cavilla 		.flush_on_ret	= false,
845e930bffeSAndrea Arcangeli 		.may_block	= mmu_notifier_range_blockable(range),
846501b9185SSean Christopherson 	};
847501b9185SSean Christopherson 	bool wake;
8483039bcc7SSean Christopherson 
849e930bffeSAndrea Arcangeli 	__kvm_handle_hva_range(kvm, &hva_range);
850e930bffeSAndrea Arcangeli 
8511d7715c6SVladimir Davydov 	/* Pairs with the increment in range_start(). */
8521d7715c6SVladimir Davydov 	spin_lock(&kvm->mn_invalidate_lock);
8531d7715c6SVladimir Davydov 	wake = (--kvm->mn_active_invalidate_count == 0);
8541d7715c6SVladimir Davydov 	spin_unlock(&kvm->mn_invalidate_lock);
8551d7715c6SVladimir Davydov 
856501b9185SSean Christopherson 	/*
857501b9185SSean Christopherson 	 * There can only be one waiter, since the wait happens under
8581d7715c6SVladimir Davydov 	 * slots_lock.
8591d7715c6SVladimir Davydov 	 */
8601d7715c6SVladimir Davydov 	if (wake)
8611d7715c6SVladimir Davydov 		rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
8621d7715c6SVladimir Davydov 
8631d7715c6SVladimir Davydov 	BUG_ON(kvm->mmu_invalidate_in_progress < 0);
8641d7715c6SVladimir Davydov }
8651d7715c6SVladimir Davydov 
kvm_mmu_notifier_clear_flush_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)8661d7715c6SVladimir Davydov static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
8671d7715c6SVladimir Davydov 					      struct mm_struct *mm,
8681d7715c6SVladimir Davydov 					      unsigned long start,
8691d7715c6SVladimir Davydov 					      unsigned long end)
8701d7715c6SVladimir Davydov {
8713039bcc7SSean Christopherson 	trace_kvm_age_hva(start, end);
8721d7715c6SVladimir Davydov 
8731d7715c6SVladimir Davydov 	return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
8748ee53820SAndrea Arcangeli 				    kvm_age_gfn);
8758ee53820SAndrea Arcangeli }
8768ee53820SAndrea Arcangeli 
kvm_mmu_notifier_clear_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)8778ee53820SAndrea Arcangeli static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
878501b9185SSean Christopherson 					struct mm_struct *mm,
879501b9185SSean Christopherson 					unsigned long start,
8803039bcc7SSean Christopherson 					unsigned long end)
8813039bcc7SSean Christopherson {
8828ee53820SAndrea Arcangeli 	trace_kvm_age_hva(start, end);
8838ee53820SAndrea Arcangeli 
88485db06e5SMarcelo Tosatti 	/*
88585db06e5SMarcelo Tosatti 	 * Even though we do not flush TLB, this will still adversely
88685db06e5SMarcelo Tosatti 	 * affect performance on pre-Haswell Intel EPT, where there is
88785db06e5SMarcelo Tosatti 	 * no EPT Access Bit to clear so that we have to tear down EPT
888eda2bedaSLai Jiangshan 	 * tables instead. If we find this unacceptable, we can always
889eda2bedaSLai Jiangshan 	 * add a parameter to kvm_age_hva so that it effectively doesn't
890eda2bedaSLai Jiangshan 	 * do anything on clear_young.
891683412ccSMingwei Zhang 	 *
892eda2bedaSLai Jiangshan 	 * Also note that currently we never issue secondary TLB flushes
89385db06e5SMarcelo Tosatti 	 * from clear_young, leaving this job up to the regular system
89485db06e5SMarcelo Tosatti 	 * cadence. If we find this inaccurate, we might come up with a
895e930bffeSAndrea Arcangeli 	 * more sophisticated heuristic later.
896e930bffeSAndrea Arcangeli 	 */
897e930bffeSAndrea Arcangeli 	return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
898e930bffeSAndrea Arcangeli }
8991d7715c6SVladimir Davydov 
kvm_mmu_notifier_test_young(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long address)9008ee53820SAndrea Arcangeli static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
9013da0dd43SIzik Eidus 				       struct mm_struct *mm,
90285db06e5SMarcelo Tosatti 				       unsigned long address)
903e930bffeSAndrea Arcangeli {
9044c07b0a4SAvi Kivity 	trace_kvm_test_age_hva(address);
9054c07b0a4SAvi Kivity 
9064c07b0a4SAvi Kivity 	return kvm_handle_hva_range_no_flush(mn, address, address + 1,
9074c07b0a4SAvi Kivity 					     kvm_test_age_gfn);
9084c07b0a4SAvi Kivity }
9094c07b0a4SAvi Kivity 
kvm_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)9104c07b0a4SAvi Kivity static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
9114c07b0a4SAvi Kivity 				     struct mm_struct *mm)
9124c07b0a4SAvi Kivity {
9134c07b0a4SAvi Kivity 	struct kvm *kvm = mmu_notifier_to_kvm(mn);
9144c07b0a4SAvi Kivity 	int idx;
9154c07b0a4SAvi Kivity 
9164c07b0a4SAvi Kivity 	idx = srcu_read_lock(&kvm->srcu);
9174c07b0a4SAvi Kivity 	kvm_flush_shadow_all(kvm);
918e930bffeSAndrea Arcangeli 	srcu_read_unlock(&kvm->srcu, idx);
919e930bffeSAndrea Arcangeli }
9202fdef3a2SSergey Senozhatsky 
9212fdef3a2SSergey Senozhatsky static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
9222fdef3a2SSergey Senozhatsky 	.invalidate_range_start	= kvm_mmu_notifier_invalidate_range_start,
9232fdef3a2SSergey Senozhatsky 	.invalidate_range_end	= kvm_mmu_notifier_invalidate_range_end,
9242fdef3a2SSergey Senozhatsky 	.clear_flush_young	= kvm_mmu_notifier_clear_flush_young,
9252fdef3a2SSergey Senozhatsky 	.clear_young		= kvm_mmu_notifier_clear_young,
9262fdef3a2SSergey Senozhatsky 	.test_young		= kvm_mmu_notifier_test_young,
9272fdef3a2SSergey Senozhatsky 	.change_pte		= kvm_mmu_notifier_change_pte,
9282fdef3a2SSergey Senozhatsky 	.release		= kvm_mmu_notifier_release,
9292fdef3a2SSergey Senozhatsky };
9302fdef3a2SSergey Senozhatsky 
kvm_init_mmu_notifier(struct kvm * kvm)9312fdef3a2SSergey Senozhatsky static int kvm_init_mmu_notifier(struct kvm *kvm)
9322fdef3a2SSergey Senozhatsky {
9332fdef3a2SSergey Senozhatsky 	kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
9342fdef3a2SSergey Senozhatsky 	return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
9352fdef3a2SSergey Senozhatsky }
9362fdef3a2SSergey Senozhatsky 
9372fdef3a2SSergey Senozhatsky #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
9382fdef3a2SSergey Senozhatsky 
kvm_init_mmu_notifier(struct kvm * kvm)9392fdef3a2SSergey Senozhatsky static int kvm_init_mmu_notifier(struct kvm *kvm)
9402fdef3a2SSergey Senozhatsky {
9412fdef3a2SSergey Senozhatsky 	return 0;
9422fdef3a2SSergey Senozhatsky }
9432fdef3a2SSergey Senozhatsky 
9442fdef3a2SSergey Senozhatsky #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
9452fdef3a2SSergey Senozhatsky 
9462fdef3a2SSergey Senozhatsky #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
kvm_pm_notifier_call(struct notifier_block * bl,unsigned long state,void * unused)9472fdef3a2SSergey Senozhatsky static int kvm_pm_notifier_call(struct notifier_block *bl,
9482fdef3a2SSergey Senozhatsky 				unsigned long state,
9492fdef3a2SSergey Senozhatsky 				void *unused)
9502fdef3a2SSergey Senozhatsky {
9512fdef3a2SSergey Senozhatsky 	struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
952a47d2b07SPaolo Bonzini 
953a47d2b07SPaolo Bonzini 	return kvm_arch_pm_notifier(kvm, state);
954a47d2b07SPaolo Bonzini }
955a47d2b07SPaolo Bonzini 
kvm_init_pm_notifier(struct kvm * kvm)956a47d2b07SPaolo Bonzini static void kvm_init_pm_notifier(struct kvm *kvm)
957a47d2b07SPaolo Bonzini {
958a47d2b07SPaolo Bonzini 	kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
959a47d2b07SPaolo Bonzini 	/* Suspend KVM before we suspend ftrace, RCU, etc. */
960a47d2b07SPaolo Bonzini 	kvm->pm_notifier.priority = INT_MAX;
961a54d8066SMaciej S. Szmigiero 	register_pm_notifier(&kvm->pm_notifier);
962e96c81eeSSean Christopherson }
963a47d2b07SPaolo Bonzini 
kvm_destroy_pm_notifier(struct kvm * kvm)964e96c81eeSSean Christopherson static void kvm_destroy_pm_notifier(struct kvm *kvm)
965a47d2b07SPaolo Bonzini {
966e96c81eeSSean Christopherson 	unregister_pm_notifier(&kvm->pm_notifier);
967a47d2b07SPaolo Bonzini }
968a54d8066SMaciej S. Szmigiero #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
kvm_init_pm_notifier(struct kvm * kvm)969a47d2b07SPaolo Bonzini static void kvm_init_pm_notifier(struct kvm *kvm)
970a47d2b07SPaolo Bonzini {
971a47d2b07SPaolo Bonzini }
972a47d2b07SPaolo Bonzini 
kvm_destroy_pm_notifier(struct kvm * kvm)973a54d8066SMaciej S. Szmigiero static void kvm_destroy_pm_notifier(struct kvm *kvm)
974a47d2b07SPaolo Bonzini {
975a54d8066SMaciej S. Szmigiero }
976a47d2b07SPaolo Bonzini #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
977a54d8066SMaciej S. Szmigiero 
kvm_destroy_dirty_bitmap(struct kvm_memory_slot * memslot)978a54d8066SMaciej S. Szmigiero static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
979a54d8066SMaciej S. Szmigiero {
980a54d8066SMaciej S. Szmigiero 	if (!memslot->dirty_bitmap)
981a54d8066SMaciej S. Szmigiero 		return;
982a54d8066SMaciej S. Szmigiero 
983a54d8066SMaciej S. Szmigiero 	kvfree(memslot->dirty_bitmap);
984a47d2b07SPaolo Bonzini 	memslot->dirty_bitmap = NULL;
985a47d2b07SPaolo Bonzini }
986a54d8066SMaciej S. Szmigiero 
987e96c81eeSSean Christopherson /* This does not remove the slot from struct kvm_memslots data structures */
kvm_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)988bf3e05bcSXiao Guangrong static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
989bf3e05bcSXiao Guangrong {
990bc9e9e67SJing Zhang 	kvm_destroy_dirty_bitmap(slot);
991bc9e9e67SJing Zhang 
992bc9e9e67SJing Zhang 	kvm_arch_free_memslot(kvm, slot);
993bc9e9e67SJing Zhang 
994bc9e9e67SJing Zhang 	kfree(slot);
995bc9e9e67SJing Zhang }
996bc9e9e67SJing Zhang 
kvm_free_memslots(struct kvm * kvm,struct kvm_memslots * slots)997bc9e9e67SJing Zhang static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
998bc9e9e67SJing Zhang {
999bc9e9e67SJing Zhang 	struct hlist_node *idnode;
1000bc9e9e67SJing Zhang 	struct kvm_memory_slot *memslot;
1001bc9e9e67SJing Zhang 	int bkt;
1002bc9e9e67SJing Zhang 
1003536a6f88SJanosch Frank 	/*
1004536a6f88SJanosch Frank 	 * The same memslot objects live in both active and inactive sets,
1005536a6f88SJanosch Frank 	 * arbitrarily free using index '1' so the second invocation of this
1006bc9e9e67SJing Zhang 	 * function isn't operating over a structure with dangling pointers
1007bc9e9e67SJing Zhang 	 * (even though this function isn't actually touching them).
1008536a6f88SJanosch Frank 	 */
1009a44a4cc1SOliver Upton 	if (!slots->node_idx)
1010536a6f88SJanosch Frank 		return;
1011536a6f88SJanosch Frank 
1012536a6f88SJanosch Frank 	hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
1013536a6f88SJanosch Frank 		kvm_free_memslot(kvm, memslot);
10149d5a1dceSLuiz Capitulino }
1015536a6f88SJanosch Frank 
kvm_stats_debugfs_mode(const struct _kvm_stats_desc * pdesc)1016536a6f88SJanosch Frank static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
1017536a6f88SJanosch Frank {
1018536a6f88SJanosch Frank 	switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
10199d5a1dceSLuiz Capitulino 	case KVM_STATS_TYPE_INSTANT:
1020536a6f88SJanosch Frank 		return 0444;
102159f82aadSOliver Upton 	case KVM_STATS_TYPE_CUMULATIVE:
1022536a6f88SJanosch Frank 	case KVM_STATS_TYPE_PEAK:
102385cd39afSPaolo Bonzini 	default:
102485cd39afSPaolo Bonzini 		return 0644;
1025536a6f88SJanosch Frank 	}
1026536a6f88SJanosch Frank }
1027bc9e9e67SJing Zhang 
1028b74ed7a6SOliver Upton 
kvm_destroy_vm_debugfs(struct kvm * kvm)1029bc9e9e67SJing Zhang static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1030bc9e9e67SJing Zhang {
1031536a6f88SJanosch Frank 	int i;
1032536a6f88SJanosch Frank 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1033536a6f88SJanosch Frank 				      kvm_vcpu_stats_header.num_desc;
1034536a6f88SJanosch Frank 
103559f82aadSOliver Upton 	if (IS_ERR(kvm->debugfs_dentry))
103685cd39afSPaolo Bonzini 		return;
103785cd39afSPaolo Bonzini 
103885cd39afSPaolo Bonzini 	debugfs_remove_recursive(kvm->debugfs_dentry);
103985cd39afSPaolo Bonzini 
104085cd39afSPaolo Bonzini 	if (kvm->debugfs_stat_data) {
104185cd39afSPaolo Bonzini 		for (i = 0; i < kvm_debugfs_num_entries; i++)
104285cd39afSPaolo Bonzini 			kfree(kvm->debugfs_stat_data[i]);
104385cd39afSPaolo Bonzini 		kfree(kvm->debugfs_stat_data);
104485cd39afSPaolo Bonzini 	}
104585cd39afSPaolo Bonzini }
104685cd39afSPaolo Bonzini 
kvm_create_vm_debugfs(struct kvm * kvm,const char * fdname)104785cd39afSPaolo Bonzini static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1048536a6f88SJanosch Frank {
104985cd39afSPaolo Bonzini 	static DEFINE_MUTEX(kvm_debugfs_lock);
1050536a6f88SJanosch Frank 	struct dentry *dent;
1051536a6f88SJanosch Frank 	char dir_name[ITOA_MAX_LEN * 2];
1052b12ce36aSBen Gardon 	struct kvm_stat_data *stat_data;
1053536a6f88SJanosch Frank 	const struct _kvm_stats_desc *pdesc;
1054b74ed7a6SOliver Upton 	int i, ret = -ENOMEM;
1055536a6f88SJanosch Frank 	int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1056bc9e9e67SJing Zhang 				      kvm_vcpu_stats_header.num_desc;
1057bc9e9e67SJing Zhang 
1058b12ce36aSBen Gardon 	if (!debugfs_initialized())
1059536a6f88SJanosch Frank 		return 0;
1060b74ed7a6SOliver Upton 
1061536a6f88SJanosch Frank 	snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1062536a6f88SJanosch Frank 	mutex_lock(&kvm_debugfs_lock);
1063bc9e9e67SJing Zhang 	dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1064bc9e9e67SJing Zhang 	if (dent) {
1065bc9e9e67SJing Zhang 		pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1066bc9e9e67SJing Zhang 		dput(dent);
1067bc9e9e67SJing Zhang 		mutex_unlock(&kvm_debugfs_lock);
1068bc9e9e67SJing Zhang 		return 0;
1069bc9e9e67SJing Zhang 	}
1070bc9e9e67SJing Zhang 	dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1071bc9e9e67SJing Zhang 	mutex_unlock(&kvm_debugfs_lock);
1072bc9e9e67SJing Zhang 	if (IS_ERR(dent))
1073bc9e9e67SJing Zhang 		return 0;
1074bc9e9e67SJing Zhang 
1075b74ed7a6SOliver Upton 	kvm->debugfs_dentry = dent;
1076bc9e9e67SJing Zhang 	kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1077bc9e9e67SJing Zhang 					 sizeof(*kvm->debugfs_stat_data),
1078bc9e9e67SJing Zhang 					 GFP_KERNEL_ACCOUNT);
1079bc9e9e67SJing Zhang 	if (!kvm->debugfs_stat_data)
1080004d62ebSPavel Skripkin 		goto out_err;
1081bc9e9e67SJing Zhang 
108209cbcef6SMilan Pandurov 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
108309cbcef6SMilan Pandurov 		pdesc = &kvm_vm_stats_desc[i];
1084536a6f88SJanosch Frank 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
10853165af73SPeter Xu 		if (!stat_data)
10863165af73SPeter Xu 			goto out_err;
1087b74ed7a6SOliver Upton 
1088b74ed7a6SOliver Upton 		stat_data->kvm = kvm;
10893165af73SPeter Xu 		stat_data->desc = pdesc;
1090536a6f88SJanosch Frank 		stat_data->kind = KVM_STAT_VM;
1091b74ed7a6SOliver Upton 		kvm->debugfs_stat_data[i] = stat_data;
1092b74ed7a6SOliver Upton 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1093b74ed7a6SOliver Upton 				    kvm->debugfs_dentry, stat_data,
1094536a6f88SJanosch Frank 				    &stat_fops_per_vm);
1095536a6f88SJanosch Frank 	}
10961aa9b957SJunaid Shahid 
10971aa9b957SJunaid Shahid 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
10981aa9b957SJunaid Shahid 		pdesc = &kvm_vcpu_stats_desc[i];
10991aa9b957SJunaid Shahid 		stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
11001aa9b957SJunaid Shahid 		if (!stat_data)
11011aa9b957SJunaid Shahid 			goto out_err;
11021aa9b957SJunaid Shahid 
11031aa9b957SJunaid Shahid 		stat_data->kvm = kvm;
11041aa9b957SJunaid Shahid 		stat_data->desc = pdesc;
11051aa9b957SJunaid Shahid 		stat_data->kind = KVM_STAT_VCPU;
11061aa9b957SJunaid Shahid 		kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
11071aa9b957SJunaid Shahid 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
11081aa9b957SJunaid Shahid 				    kvm->debugfs_dentry, stat_data,
11091aa9b957SJunaid Shahid 				    &stat_fops_per_vm);
11101aa9b957SJunaid Shahid 	}
11111aa9b957SJunaid Shahid 
11121aa9b957SJunaid Shahid 	ret = kvm_arch_create_vm_debugfs(kvm);
11133165af73SPeter Xu 	if (ret)
11143165af73SPeter Xu 		goto out_err;
11153165af73SPeter Xu 
11163165af73SPeter Xu 	return 0;
11173165af73SPeter Xu out_err:
11183165af73SPeter Xu 	kvm_destroy_vm_debugfs(kvm);
11193165af73SPeter Xu 	return ret;
11203165af73SPeter Xu }
11213165af73SPeter Xu 
11223165af73SPeter Xu /*
11233165af73SPeter Xu  * Called after the VM is otherwise initialized, but just before adding it to
1124b74ed7a6SOliver Upton  * the vm_list.
11250fce5623SAvi Kivity  */
kvm_arch_post_init_vm(struct kvm * kvm)1126d89f5effSJan Kiszka int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1127a54d8066SMaciej S. Szmigiero {
11289121923cSJim Mattson 	return 0;
1129a54d8066SMaciej S. Szmigiero }
11300fce5623SAvi Kivity 
1131d89f5effSJan Kiszka /*
1132d89f5effSJan Kiszka  * Called just after removing the VM from the vm_list, but before doing any
1133d89f5effSJan Kiszka  * other destruction.
1134405294f2SSean Christopherson  */
kvm_arch_pre_destroy_vm(struct kvm * kvm)1135405294f2SSean Christopherson void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1136405294f2SSean Christopherson {
1137531810caSBen Gardon }
1138f1f10076SVegard Nossum 
1139e9ad4ec8SPaolo Bonzini /*
1140e9ad4ec8SPaolo Bonzini  * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1141e9ad4ec8SPaolo Bonzini  * be setup already, so we can create arch-specific debugfs entries under it.
1142e9ad4ec8SPaolo Bonzini  * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1143e9ad4ec8SPaolo Bonzini  * a per-arch destroy interface is not needed.
1144b10a038eSBen Gardon  */
kvm_arch_create_vm_debugfs(struct kvm * kvm)114552ac8b35SPaolo Bonzini int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
114652ac8b35SPaolo Bonzini {
1147c5b07754SMarc Zyngier 	return 0;
114852ac8b35SPaolo Bonzini }
1149982ed0deSDavid Woodhouse 
kvm_create_vm(unsigned long type,const char * fdname)1150982ed0deSDavid Woodhouse static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1151e9ad4ec8SPaolo Bonzini {
1152e9ad4ec8SPaolo Bonzini 	struct kvm *kvm = kvm_arch_alloc_vm();
1153f502cc56SSean Christopherson 	struct kvm_memslots *slots;
1154e9ad4ec8SPaolo Bonzini 	int r = -ENOMEM;
11559121923cSJim Mattson 	int i, j;
11569121923cSJim Mattson 
11575c697c36SSean Christopherson 	if (!kvm)
11585c697c36SSean Christopherson 		return ERR_PTR(-ENOMEM);
11595c697c36SSean Christopherson 
11605c697c36SSean Christopherson 	/* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
11615c697c36SSean Christopherson 	__module_get(kvm_chardev_ops.owner);
11625c697c36SSean Christopherson 
1163f2759c08SOliver Upton 	KVM_MMU_LOCK_INIT(kvm);
1164f2759c08SOliver Upton 	mmgrab(current->mm);
1165f2759c08SOliver Upton 	kvm->mm = current->mm;
11668a44119aSPaolo Bonzini 	kvm_eventfd_init(kvm);
11678a44119aSPaolo Bonzini 	mutex_init(&kvm->lock);
11688a44119aSPaolo Bonzini 	mutex_init(&kvm->irq_lock);
11698a44119aSPaolo Bonzini 	mutex_init(&kvm->slots_lock);
11708a44119aSPaolo Bonzini 	mutex_init(&kvm->slots_arch_lock);
1171e2d3fcafSPaolo Bonzini 	spin_lock_init(&kvm->mn_invalidate_lock);
11729121923cSJim Mattson 	rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1173a54d8066SMaciej S. Szmigiero 	xa_init(&kvm->vcpu_array);
1174a54d8066SMaciej S. Szmigiero 
11759121923cSJim Mattson 	INIT_LIST_HEAD(&kvm->gpc_list);
1176a54d8066SMaciej S. Szmigiero 	spin_lock_init(&kvm->gpc_lock);
1177a54d8066SMaciej S. Szmigiero 
1178a54d8066SMaciej S. Szmigiero 	INIT_LIST_HEAD(&kvm->devices);
1179a54d8066SMaciej S. Szmigiero 	kvm->max_vcpus = KVM_MAX_VCPUS;
1180a54d8066SMaciej S. Szmigiero 
1181a54d8066SMaciej S. Szmigiero 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
11829121923cSJim Mattson 
11839121923cSJim Mattson 	/*
1184a54d8066SMaciej S. Szmigiero 	 * Force subsequent debugfs file creations to fail if the VM directory
1185a54d8066SMaciej S. Szmigiero 	 * is not created (by kvm_create_vm_debugfs()).
1186a54d8066SMaciej S. Szmigiero 	 */
11879121923cSJim Mattson 	kvm->debugfs_dentry = ERR_PTR(-ENOENT);
11889121923cSJim Mattson 
11899121923cSJim Mattson 	snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
11909121923cSJim Mattson 		 task_pid_nr(current));
11919121923cSJim Mattson 
11929121923cSJim Mattson 	if (init_srcu_struct(&kvm->srcu))
1193a97b0e77SJim Mattson 		goto out_err_no_srcu;
11949121923cSJim Mattson 	if (init_srcu_struct(&kvm->irq_srcu))
11959121923cSJim Mattson 		goto out_err_no_irq_srcu;
1196e08b9637SCarsten Otte 
1197d89f5effSJan Kiszka 	refcount_set(&kvm->users_count, 1);
1198a97b0e77SJim Mattson 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
119910474ae8SAlexander Graf 		for (j = 0; j < 2; j++) {
120010474ae8SAlexander Graf 			slots = &kvm->__memslots[i][j];
120110474ae8SAlexander Graf 
1202719d93cdSChristian Borntraeger 			atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
120310474ae8SAlexander Graf 			slots->hva_tree = RB_ROOT_CACHED;
1204c77dcacbSPaolo Bonzini 			slots->gfn_tree = RB_ROOT;
1205136bdfeeSGleb Natapov 			hash_init(slots->id_hash);
120675858a84SAvi Kivity 			slots->node_idx = j;
12070fce5623SAvi Kivity 
120874b5c5bfSMike Waychison 			/* Generations must be different for each address space. */
120974b5c5bfSMike Waychison 			slots->generation = i;
12101aa9b957SJunaid Shahid 		}
12111aa9b957SJunaid Shahid 
1212c2b82397SSean Christopherson 		rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1213c2b82397SSean Christopherson 	}
1214c2b82397SSean Christopherson 
1215c2b82397SSean Christopherson 	for (i = 0; i < KVM_NR_BUSES; i++) {
12164ba4f419SSean Christopherson 		rcu_assign_pointer(kvm->buses[i],
12174ba4f419SSean Christopherson 			kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
12184ba4f419SSean Christopherson 		if (!kvm->buses[i])
12194ba4f419SSean Christopherson 			goto out_err_no_arch_destroy_vm;
12201aa9b957SJunaid Shahid 	}
12211aa9b957SJunaid Shahid 
12224ba4f419SSean Christopherson 	r = kvm_arch_init_vm(kvm, type);
122374b5c5bfSMike Waychison 	if (r)
12240d9ce162SJunaid Shahid 		goto out_err_no_arch_destroy_vm;
12250fce5623SAvi Kivity 
12260d9ce162SJunaid Shahid 	r = hardware_enable_all();
1227d89f5effSJan Kiszka 	if (r)
12282ecd9d29SPeter Zijlstra 		goto out_err_no_disable;
12292fdef3a2SSergey Senozhatsky 
12302ecd9d29SPeter Zijlstra #ifdef CONFIG_HAVE_KVM_IRQFD
12310fce5623SAvi Kivity 	INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
123210474ae8SAlexander Graf #endif
123310474ae8SAlexander Graf 
12344ba4f419SSean Christopherson 	r = kvm_init_mmu_notifier(kvm);
12354ba4f419SSean Christopherson 	if (r)
1236c2b82397SSean Christopherson 		goto out_err_no_mmu_notifier;
1237c2b82397SSean Christopherson 
12381aa9b957SJunaid Shahid 	r = kvm_coalesced_mmio_init(kvm);
12391aa9b957SJunaid Shahid 	if (r < 0)
12401aa9b957SJunaid Shahid 		goto out_no_coalesced_mmio;
12411aa9b957SJunaid Shahid 
12421aa9b957SJunaid Shahid 	r = kvm_create_vm_debugfs(kvm, fdname);
124310474ae8SAlexander Graf 	if (r)
1244719d93cdSChristian Borntraeger 		goto out_err_no_debugfs;
1245a97b0e77SJim Mattson 
1246a97b0e77SJim Mattson 	r = kvm_arch_post_init_vm(kvm);
1247e2d3fcafSPaolo Bonzini 	if (r)
1248e93f8a0fSMarcelo Tosatti 		goto out_err;
12493898da94SPaolo Bonzini 
12508a44119aSPaolo Bonzini 	mutex_lock(&kvm_lock);
12518a44119aSPaolo Bonzini 	list_add(&kvm->vm_list, &vm_list);
12528a44119aSPaolo Bonzini 	mutex_unlock(&kvm_lock);
12538a44119aSPaolo Bonzini 
1254d89f5effSJan Kiszka 	preempt_notifier_inc();
1255e9ad4ec8SPaolo Bonzini 	kvm_init_pm_notifier(kvm);
1256405294f2SSean Christopherson 
125710474ae8SAlexander Graf 	return kvm;
12580fce5623SAvi Kivity 
12590fce5623SAvi Kivity out_err:
126007f0a7bdSScott Wood 	kvm_destroy_vm_debugfs(kvm);
126107f0a7bdSScott Wood out_err_no_debugfs:
1262e6e3b5a6SGeliang Tang 	kvm_coalesced_mmio_free(kvm);
126307f0a7bdSScott Wood out_no_coalesced_mmio:
1264a28ebea2SChristoffer Dall #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1265a28ebea2SChristoffer Dall 	if (kvm->mmu_notifier.ops)
1266a28ebea2SChristoffer Dall 		mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1267a28ebea2SChristoffer Dall #endif
1268a28ebea2SChristoffer Dall out_err_no_mmu_notifier:
1269e6e3b5a6SGeliang Tang 	hardware_disable_all();
1270e6e3b5a6SGeliang Tang out_err_no_disable:
127107f0a7bdSScott Wood 	kvm_arch_destroy_vm(kvm);
127207f0a7bdSScott Wood out_err_no_arch_destroy_vm:
127307f0a7bdSScott Wood 	WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
127407f0a7bdSScott Wood 	for (i = 0; i < KVM_NR_BUSES; i++)
12750fce5623SAvi Kivity 		kfree(kvm_get_bus(kvm, i));
12760fce5623SAvi Kivity 	cleanup_srcu_struct(&kvm->irq_srcu);
1277e93f8a0fSMarcelo Tosatti out_err_no_irq_srcu:
12780fce5623SAvi Kivity 	cleanup_srcu_struct(&kvm->srcu);
12790fce5623SAvi Kivity out_err_no_srcu:
12802fdef3a2SSergey Senozhatsky 	kvm_arch_free_vm(kvm);
1281286de8f6SClaudio Imbrenda 	mmdrop(current->mm);
1282536a6f88SJanosch Frank 	module_put(kvm_chardev_ops.owner);
1283ad8ba2cdSSheng Yang 	return ERR_PTR(r);
12840d9ce162SJunaid Shahid }
12850fce5623SAvi Kivity 
kvm_destroy_devices(struct kvm * kvm)12860d9ce162SJunaid Shahid static void kvm_destroy_devices(struct kvm *kvm)
12871aa9b957SJunaid Shahid {
12881aa9b957SJunaid Shahid 	struct kvm_device *dev, *tmp;
1289399ec807SAvi Kivity 
1290df630b8cSPeter Xu 	/*
12913898da94SPaolo Bonzini 	 * We do not need to take the kvm->lock here, because nobody else
12924a12f951SChristian Borntraeger 	 * has a reference to the struct kvm at this point and therefore
12934a12f951SChristian Borntraeger 	 * cannot access the devices list anyhow.
12944a12f951SChristian Borntraeger 	 */
1295df630b8cSPeter Xu 	list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1296df630b8cSPeter Xu 		list_del(&dev->vm_node);
1297980da6ceSAvi Kivity 		dev->ops->destroy(dev);
1298e930bffeSAndrea Arcangeli 	}
1299e930bffeSAndrea Arcangeli }
130052ac8b35SPaolo Bonzini 
kvm_destroy_vm(struct kvm * kvm)130152ac8b35SPaolo Bonzini static void kvm_destroy_vm(struct kvm *kvm)
130252ac8b35SPaolo Bonzini {
130352ac8b35SPaolo Bonzini 	int i;
1304b0d23708SJun Miao 	struct mm_struct *mm = kvm->mm;
130552ac8b35SPaolo Bonzini 
130652ac8b35SPaolo Bonzini 	kvm_destroy_pm_notifier(kvm);
130752ac8b35SPaolo Bonzini 	kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
130852ac8b35SPaolo Bonzini 	kvm_destroy_vm_debugfs(kvm);
130952ac8b35SPaolo Bonzini 	kvm_arch_sync_events(kvm);
1310f00be0caSGleb Natapov 	mutex_lock(&kvm_lock);
1311683412ccSMingwei Zhang 	list_del(&kvm->vm_list);
1312e930bffeSAndrea Arcangeli 	mutex_unlock(&kvm_lock);
13130fce5623SAvi Kivity 	kvm_arch_pre_destroy_vm(kvm);
131407f0a7bdSScott Wood 
1315a54d8066SMaciej S. Szmigiero 	kvm_free_irq_routing(kvm);
1316a54d8066SMaciej S. Szmigiero 	for (i = 0; i < KVM_NR_BUSES; i++) {
1317a54d8066SMaciej S. Szmigiero 		struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1318a54d8066SMaciej S. Szmigiero 
1319820b3fcdSPaolo Bonzini 		if (bus)
1320d89f5effSJan Kiszka 			kvm_io_bus_destroy(bus);
1321d89f5effSJan Kiszka 		kvm->buses[i] = NULL;
13222ecd9d29SPeter Zijlstra 	}
132310474ae8SAlexander Graf 	kvm_coalesced_mmio_free(kvm);
13240fce5623SAvi Kivity #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
13255f6de5cbSDavid Matlack 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
13260fce5623SAvi Kivity 	/*
13270fce5623SAvi Kivity 	 * At this point, pending calls to invalidate_range_start()
1328d39f13b0SIzik Eidus 	 * have completed but no more MMU notifiers will run, so
1329d39f13b0SIzik Eidus 	 * mn_active_invalidate_count may remain unbalanced.
1330e3736c3eSElena Reshetova 	 * No threads can be waiting in kvm_swap_active_memslots() as the
1331d39f13b0SIzik Eidus 	 * last reference on KVM has been dropped, but freeing
1332d39f13b0SIzik Eidus 	 * memslots would deadlock without this manual intervention.
1333d39f13b0SIzik Eidus 	 */
1334605c7130SPeter Xu 	WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1335605c7130SPeter Xu 	kvm->mn_active_invalidate_count = 0;
1336605c7130SPeter Xu #else
1337605c7130SPeter Xu 	kvm_flush_shadow_all(kvm);
1338605c7130SPeter Xu #endif
1339605c7130SPeter Xu 	kvm_arch_destroy_vm(kvm);
1340605c7130SPeter Xu 	kvm_destroy_devices(kvm);
1341605c7130SPeter Xu 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1342605c7130SPeter Xu 		kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1343605c7130SPeter Xu 		kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1344d39f13b0SIzik Eidus 	}
1345d39f13b0SIzik Eidus 	cleanup_srcu_struct(&kvm->irq_srcu);
1346e3736c3eSElena Reshetova 	cleanup_srcu_struct(&kvm->srcu);
1347d39f13b0SIzik Eidus 	kvm_arch_free_vm(kvm);
1348d39f13b0SIzik Eidus 	preempt_notifier_dec();
1349d39f13b0SIzik Eidus 	hardware_disable_all();
1350d39f13b0SIzik Eidus 	mmdrop(mm);
1351149487bdSSean Christopherson 	module_put(kvm_chardev_ops.owner);
1352149487bdSSean Christopherson }
1353149487bdSSean Christopherson 
kvm_get_kvm(struct kvm * kvm)1354149487bdSSean Christopherson void kvm_get_kvm(struct kvm *kvm)
1355149487bdSSean Christopherson {
1356149487bdSSean Christopherson 	refcount_inc(&kvm->users_count);
1357149487bdSSean Christopherson }
1358149487bdSSean Christopherson EXPORT_SYMBOL_GPL(kvm_get_kvm);
1359149487bdSSean Christopherson 
1360149487bdSSean Christopherson /*
1361149487bdSSean Christopherson  * Make sure the vm is not during destruction, which is a safe version of
1362149487bdSSean Christopherson  * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1363d39f13b0SIzik Eidus  */
kvm_get_kvm_safe(struct kvm * kvm)13640fce5623SAvi Kivity bool kvm_get_kvm_safe(struct kvm *kvm)
13650fce5623SAvi Kivity {
13660fce5623SAvi Kivity 	return refcount_inc_not_zero(&kvm->users_count);
13670fce5623SAvi Kivity }
1368721eecbfSGregory Haskins EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1369721eecbfSGregory Haskins 
kvm_put_kvm(struct kvm * kvm)1370d39f13b0SIzik Eidus void kvm_put_kvm(struct kvm *kvm)
13710fce5623SAvi Kivity {
13720fce5623SAvi Kivity 	if (refcount_dec_and_test(&kvm->users_count))
13730fce5623SAvi Kivity 		kvm_destroy_vm(kvm);
1374515a0127STakuya Yoshikawa }
1375515a0127STakuya Yoshikawa EXPORT_SYMBOL_GPL(kvm_put_kvm);
13760dff0846SSean Christopherson 
1377515a0127STakuya Yoshikawa /*
13783c9bd400SJay Zhou  * Used to put a reference that was taken on behalf of an object associated
1379a36a57b1STakuya Yoshikawa  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
138037b2a651SPaolo Bonzini  * of the new file descriptor fails and the reference cannot be transferred to
1381a36a57b1STakuya Yoshikawa  * its final owner.  In such cases, the caller is still actively using @kvm and
138237b2a651SPaolo Bonzini  * will fail miserably if the refcount unexpectedly hits zero.
1383a36a57b1STakuya Yoshikawa  */
kvm_put_kvm_no_destroy(struct kvm * kvm)1384a36a57b1STakuya Yoshikawa void kvm_put_kvm_no_destroy(struct kvm *kvm)
1385a36a57b1STakuya Yoshikawa {
1386a36a57b1STakuya Yoshikawa 	WARN_ON(refcount_dec_and_test(&kvm->users_count));
1387a36a57b1STakuya Yoshikawa }
1388a36a57b1STakuya Yoshikawa EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1389a54d8066SMaciej S. Szmigiero 
kvm_vm_release(struct inode * inode,struct file * filp)1390bf3e05bcSXiao Guangrong static int kvm_vm_release(struct inode *inode, struct file *filp)
1391a54d8066SMaciej S. Szmigiero {
1392a54d8066SMaciej S. Szmigiero 	struct kvm *kvm = filp->private_data;
1393bf3e05bcSXiao Guangrong 
1394a54d8066SMaciej S. Szmigiero 	kvm_irqfd_release(kvm);
13958593176cSPaolo Bonzini 
1396efbeec70SPaolo Bonzini 	kvm_put_kvm(kvm);
1397efbeec70SPaolo Bonzini 	return 0;
1398a54d8066SMaciej S. Szmigiero }
1399a54d8066SMaciej S. Szmigiero 
1400a54d8066SMaciej S. Szmigiero /*
1401efbeec70SPaolo Bonzini  * Allocation size is twice as large as the actual dirty bitmap size.
1402a54d8066SMaciej S. Szmigiero  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1403a54d8066SMaciej S. Szmigiero  */
kvm_alloc_dirty_bitmap(struct kvm_memory_slot * memslot)14040577d1abSSean Christopherson static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1405a54d8066SMaciej S. Szmigiero {
1406a54d8066SMaciej S. Szmigiero 	unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1407a54d8066SMaciej S. Szmigiero 
1408a54d8066SMaciej S. Szmigiero 	memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1409a54d8066SMaciej S. Szmigiero 	if (!memslot->dirty_bitmap)
1410a54d8066SMaciej S. Szmigiero 		return -ENOMEM;
1411a54d8066SMaciej S. Szmigiero 
1412a54d8066SMaciej S. Szmigiero 	return 0;
1413a54d8066SMaciej S. Szmigiero }
1414a54d8066SMaciej S. Szmigiero 
kvm_get_inactive_memslots(struct kvm * kvm,int as_id)14150577d1abSSean Christopherson static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
14160577d1abSSean Christopherson {
1417a54d8066SMaciej S. Szmigiero 	struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1418a54d8066SMaciej S. Szmigiero 	int node_idx_inactive = active->node_idx ^ 1;
14190577d1abSSean Christopherson 
1420a54d8066SMaciej S. Szmigiero 	return &kvm->__memslots[as_id][node_idx_inactive];
1421a54d8066SMaciej S. Szmigiero }
1422a54d8066SMaciej S. Szmigiero 
14230577d1abSSean Christopherson /*
1424a54d8066SMaciej S. Szmigiero  * Helper to get the address space ID when one of memslot pointers may be NULL.
1425a54d8066SMaciej S. Szmigiero  * This also serves as a sanity that at least one of the pointers is non-NULL,
1426a54d8066SMaciej S. Szmigiero  * and that their address space IDs don't diverge.
14270577d1abSSean Christopherson  */
kvm_memslots_get_as_id(struct kvm_memory_slot * a,struct kvm_memory_slot * b)1428a54d8066SMaciej S. Szmigiero static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1429a54d8066SMaciej S. Szmigiero 				  struct kvm_memory_slot *b)
1430a54d8066SMaciej S. Szmigiero {
1431a54d8066SMaciej S. Szmigiero 	if (WARN_ON_ONCE(!a && !b))
1432a54d8066SMaciej S. Szmigiero 		return 0;
1433a54d8066SMaciej S. Szmigiero 
14340577d1abSSean Christopherson 	if (!a)
1435a54d8066SMaciej S. Szmigiero 		return b->as_id;
1436a54d8066SMaciej S. Szmigiero 	if (!b)
1437a54d8066SMaciej S. Szmigiero 		return a->as_id;
1438a54d8066SMaciej S. Szmigiero 
1439a54d8066SMaciej S. Szmigiero 	WARN_ON_ONCE(a->as_id != b->as_id);
1440a54d8066SMaciej S. Szmigiero 	return a->as_id;
1441a54d8066SMaciej S. Szmigiero }
1442a54d8066SMaciej S. Szmigiero 
kvm_insert_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)1443a54d8066SMaciej S. Szmigiero static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1444a54d8066SMaciej S. Szmigiero 				struct kvm_memory_slot *slot)
1445a54d8066SMaciej S. Szmigiero {
1446a54d8066SMaciej S. Szmigiero 	struct rb_root *gfn_tree = &slots->gfn_tree;
1447a54d8066SMaciej S. Szmigiero 	struct rb_node **node, *parent;
1448a54d8066SMaciej S. Szmigiero 	int idx = slots->node_idx;
144926b8345aSMaciej S. Szmigiero 
145026b8345aSMaciej S. Szmigiero 	parent = NULL;
145126b8345aSMaciej S. Szmigiero 	for (node = &gfn_tree->rb_node; *node; ) {
1452a54d8066SMaciej S. Szmigiero 		struct kvm_memory_slot *tmp;
1453a54d8066SMaciej S. Szmigiero 
1454a54d8066SMaciej S. Szmigiero 		tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1455a54d8066SMaciej S. Szmigiero 		parent = *node;
1456a54d8066SMaciej S. Szmigiero 		if (slot->base_gfn < tmp->base_gfn)
1457a54d8066SMaciej S. Szmigiero 			node = &(*node)->rb_left;
1458a54d8066SMaciej S. Szmigiero 		else if (slot->base_gfn > tmp->base_gfn)
14590577d1abSSean Christopherson 			node = &(*node)->rb_right;
14600577d1abSSean Christopherson 		else
1461a54d8066SMaciej S. Szmigiero 			BUG();
1462a54d8066SMaciej S. Szmigiero 	}
1463a54d8066SMaciej S. Szmigiero 
1464a54d8066SMaciej S. Szmigiero 	rb_link_node(&slot->gfn_node[idx], parent, node);
1465a54d8066SMaciej S. Szmigiero 	rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1466a54d8066SMaciej S. Szmigiero }
1467a54d8066SMaciej S. Szmigiero 
kvm_erase_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * slot)14680577d1abSSean Christopherson static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1469a54d8066SMaciej S. Szmigiero 			       struct kvm_memory_slot *slot)
1470a54d8066SMaciej S. Szmigiero {
1471a54d8066SMaciej S. Szmigiero 	rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1472a54d8066SMaciej S. Szmigiero }
1473a54d8066SMaciej S. Szmigiero 
kvm_replace_gfn_node(struct kvm_memslots * slots,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1474a54d8066SMaciej S. Szmigiero static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1475a54d8066SMaciej S. Szmigiero 				 struct kvm_memory_slot *old,
1476a54d8066SMaciej S. Szmigiero 				 struct kvm_memory_slot *new)
147726b8345aSMaciej S. Szmigiero {
1478a54d8066SMaciej S. Szmigiero 	int idx = slots->node_idx;
1479a54d8066SMaciej S. Szmigiero 
148026b8345aSMaciej S. Szmigiero 	WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1481a54d8066SMaciej S. Szmigiero 
1482a54d8066SMaciej S. Szmigiero 	rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1483a54d8066SMaciej S. Szmigiero 			&slots->gfn_tree);
1484a54d8066SMaciej S. Szmigiero }
1485a54d8066SMaciej S. Szmigiero 
148626b8345aSMaciej S. Szmigiero /*
1487a54d8066SMaciej S. Szmigiero  * Replace @old with @new in the inactive memslots.
1488a54d8066SMaciej S. Szmigiero  *
148926b8345aSMaciej S. Szmigiero  * With NULL @old this simply adds @new.
1490a54d8066SMaciej S. Szmigiero  * With NULL @new this simply removes @old.
1491a54d8066SMaciej S. Szmigiero  *
1492a54d8066SMaciej S. Szmigiero  * If @new is non-NULL its hva_node[slots_idx] range has to be set
1493a54d8066SMaciej S. Szmigiero  * appropriately.
1494a54d8066SMaciej S. Szmigiero  */
kvm_replace_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1495a54d8066SMaciej S. Szmigiero static void kvm_replace_memslot(struct kvm *kvm,
1496ed922739SMaciej S. Szmigiero 				struct kvm_memory_slot *old,
149726b8345aSMaciej S. Szmigiero 				struct kvm_memory_slot *new)
14980fce5623SAvi Kivity {
1499a54d8066SMaciej S. Szmigiero 	int as_id = kvm_memslots_get_as_id(old, new);
1500a54d8066SMaciej S. Szmigiero 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1501a54d8066SMaciej S. Szmigiero 	int idx = slots->node_idx;
15020fce5623SAvi Kivity 
1503a54d8066SMaciej S. Szmigiero 	if (old) {
1504a54d8066SMaciej S. Szmigiero 		hash_del(&old->id_node[idx]);
15050fce5623SAvi Kivity 		interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
150626b8345aSMaciej S. Szmigiero 
1507a54d8066SMaciej S. Szmigiero 		if ((long)old == atomic_long_read(&slots->last_used_slot))
1508a54d8066SMaciej S. Szmigiero 			atomic_long_set(&slots->last_used_slot, (long)new);
1509a54d8066SMaciej S. Szmigiero 
1510a54d8066SMaciej S. Szmigiero 		if (!new) {
1511a54d8066SMaciej S. Szmigiero 			kvm_erase_gfn_node(slots, old);
151226b8345aSMaciej S. Szmigiero 			return;
1513a54d8066SMaciej S. Szmigiero 		}
1514a54d8066SMaciej S. Szmigiero 	}
15150577d1abSSean Christopherson 
1516a54d8066SMaciej S. Szmigiero 	/*
1517a54d8066SMaciej S. Szmigiero 	 * Initialize @new's hva range.  Do this even when replacing an @old
1518a54d8066SMaciej S. Szmigiero 	 * slot, kvm_copy_memslot() deliberately does not touch node data.
15190577d1abSSean Christopherson 	 */
1520bf3e05bcSXiao Guangrong 	new->hva_node[idx].start = new->userspace_addr;
1521bf3e05bcSXiao Guangrong 	new->hva_node[idx].last = new->userspace_addr +
152209170a49SPaolo Bonzini 				  (new->npages << PAGE_SHIFT) - 1;
1523a50d64d6SXiao Guangrong 
15244d8b81abSXiao Guangrong 	/*
15254d8b81abSXiao Guangrong 	 * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
15260f8a4de3SChristoffer Dall 	 * hva_node needs to be swapped with remove+insert even though hva can't
15274d8b81abSXiao Guangrong 	 * change when replacing an existing slot.
15284d8b81abSXiao Guangrong 	 */
15294d8b81abSXiao Guangrong 	hash_add(slots->id_hash, &new->id_node[idx], new->id);
15304d8b81abSXiao Guangrong 	interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1531a50d64d6SXiao Guangrong 
1532a50d64d6SXiao Guangrong 	/*
1533a50d64d6SXiao Guangrong 	 * If the memslot gfn is unchanged, rb_replace_node() can be used to
1534a50d64d6SXiao Guangrong 	 * switch the node in the gfn tree instead of removing the old and
1535a50d64d6SXiao Guangrong 	 * inserting the new as two separate operations. Replacement is a
1536a54d8066SMaciej S. Szmigiero 	 * single O(1) operation versus two O(log(n)) operations for
15377ec4fb44SGleb Natapov 	 * remove+insert.
1538a54d8066SMaciej S. Szmigiero 	 */
1539a54d8066SMaciej S. Szmigiero 	if (old && old->base_gfn == new->base_gfn) {
1540a54d8066SMaciej S. Szmigiero 		kvm_replace_gfn_node(slots, old, new);
1541a54d8066SMaciej S. Szmigiero 	} else {
15427ec4fb44SGleb Natapov 		if (old)
1543361209e0SSean Christopherson 			kvm_erase_gfn_node(slots, old);
1544361209e0SSean Christopherson 		kvm_insert_gfn_node(slots, new);
1545ee3d1570SDavid Matlack 	}
154652ac8b35SPaolo Bonzini }
154752ac8b35SPaolo Bonzini 
check_memory_region_flags(const struct kvm_userspace_memory_region * mem)1548071064f1SPaolo Bonzini static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1549071064f1SPaolo Bonzini {
155052ac8b35SPaolo Bonzini 	u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
155152ac8b35SPaolo Bonzini 
155252ac8b35SPaolo Bonzini #ifdef __KVM_HAVE_READONLY_MEM
155352ac8b35SPaolo Bonzini 	valid_flags |= KVM_MEM_READONLY;
155452ac8b35SPaolo Bonzini #endif
155552ac8b35SPaolo Bonzini 
155652ac8b35SPaolo Bonzini 	if (mem->flags & ~valid_flags)
155752ac8b35SPaolo Bonzini 		return -EINVAL;
155852ac8b35SPaolo Bonzini 
155952ac8b35SPaolo Bonzini 	return 0;
1560f481b069SPaolo Bonzini }
156152ac8b35SPaolo Bonzini 
kvm_swap_active_memslots(struct kvm * kvm,int as_id)1562b10a038eSBen Gardon static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1563b10a038eSBen Gardon {
1564b10a038eSBen Gardon 	struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1565b10a038eSBen Gardon 
1566b10a038eSBen Gardon 	/* Grab the generation from the activate memslots. */
1567b10a038eSBen Gardon 	u64 gen = __kvm_memslots(kvm, as_id)->generation;
1568b10a038eSBen Gardon 
1569b10a038eSBen Gardon 	WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
15707ec4fb44SGleb Natapov 	slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1571e59dbe09STakuya Yoshikawa 
1572ee3d1570SDavid Matlack 	/*
1573361209e0SSean Christopherson 	 * Do not store the new memslots while there are invalidations in
157400116795SMiaohe Lin 	 * progress, otherwise the locking in invalidate_range_start and
1575361209e0SSean Christopherson 	 * invalidate_range_end will be unbalanced.
1576361209e0SSean Christopherson 	 */
1577361209e0SSean Christopherson 	spin_lock(&kvm->mn_invalidate_lock);
1578361209e0SSean Christopherson 	prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1579361209e0SSean Christopherson 	while (kvm->mn_active_invalidate_count) {
1580361209e0SSean Christopherson 		set_current_state(TASK_UNINTERRUPTIBLE);
15814bd518f1SPaolo Bonzini 		spin_unlock(&kvm->mn_invalidate_lock);
15824bd518f1SPaolo Bonzini 		schedule();
15834bd518f1SPaolo Bonzini 		spin_lock(&kvm->mn_invalidate_lock);
1584164bf7e5SSean Christopherson 	}
1585164bf7e5SSean Christopherson 	finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1586ee3d1570SDavid Matlack 	rcu_assign_pointer(kvm->memslots[as_id], slots);
1587164bf7e5SSean Christopherson 	spin_unlock(&kvm->mn_invalidate_lock);
1588ee3d1570SDavid Matlack 
158915248258SSean Christopherson 	/*
159015248258SSean Christopherson 	 * Acquired in kvm_set_memslot. Must be released before synchronize
159115248258SSean Christopherson 	 * SRCU below in order to avoid deadlock with another thread
15927ec4fb44SGleb Natapov 	 * acquiring the slots_arch_lock in an srcu critical section.
15937ec4fb44SGleb Natapov 	 */
159407921665SSean Christopherson 	mutex_unlock(&kvm->slots_arch_lock);
159507921665SSean Christopherson 
159607921665SSean Christopherson 	synchronize_srcu_expedited(&kvm->srcu);
159736947254SSean Christopherson 
159836947254SSean Christopherson 	/*
1599cf47f50bSSean Christopherson 	 * Increment the new memslot generation a second time, dropping the
1600cf47f50bSSean Christopherson 	 * update in-progress flag and incrementing the generation based on
1601b10a038eSBen Gardon 	 * the number of address spaces.  This provides a unique and easily
160207921665SSean Christopherson 	 * identifiable generation number while the memslots are in flux.
160307921665SSean Christopherson 	 */
160407921665SSean Christopherson 	gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
160507921665SSean Christopherson 
160607921665SSean Christopherson 	/*
160707921665SSean Christopherson 	 * Generations must be unique even across address spaces.  We do not need
1608244893faSSean Christopherson 	 * a global counter for that, instead the generation space is evenly split
160907921665SSean Christopherson 	 * across address spaces.  For example, with two address spaces, address
161007921665SSean Christopherson 	 * space 0 will use generations 0, 2, 4, ... while address space 1 will
1611244893faSSean Christopherson 	 * use generations 1, 3, 5, ...
161207921665SSean Christopherson 	 */
161386bdf3ebSGavin Shan 	gen += KVM_ADDRESS_SPACE_NUM;
161407921665SSean Christopherson 
161507921665SSean Christopherson 	kvm_arch_memslots_updated(kvm, gen);
161607921665SSean Christopherson 
161707921665SSean Christopherson 	slots->generation = gen;
161807921665SSean Christopherson }
161907921665SSean Christopherson 
kvm_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)162007921665SSean Christopherson static int kvm_prepare_memory_region(struct kvm *kvm,
1621244893faSSean Christopherson 				     const struct kvm_memory_slot *old,
162207921665SSean Christopherson 				     struct kvm_memory_slot *new,
162307921665SSean Christopherson 				     enum kvm_mr_change change)
162407921665SSean Christopherson {
162507921665SSean Christopherson 	int r;
1626c87661f8SSean Christopherson 
162707921665SSean Christopherson 	/*
162807921665SSean Christopherson 	 * If dirty logging is disabled, nullify the bitmap; the old bitmap
162907921665SSean Christopherson 	 * will be freed on "commit".  If logging is enabled in both old and
163007921665SSean Christopherson 	 * new, reuse the existing bitmap.  If logging is enabled only in the
163107921665SSean Christopherson 	 * new and KVM isn't using a ring buffer, allocate and initialize a
163207921665SSean Christopherson 	 * new bitmap.
163307921665SSean Christopherson 	 */
163407921665SSean Christopherson 	if (change != KVM_MR_DELETE) {
163507921665SSean Christopherson 		if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
163607921665SSean Christopherson 			new->dirty_bitmap = NULL;
16376c7b2202SPaolo Bonzini 		else if (old && old->dirty_bitmap)
16386c7b2202SPaolo Bonzini 			new->dirty_bitmap = old->dirty_bitmap;
163907921665SSean Christopherson 		else if (kvm_use_dirty_bitmap(kvm)) {
164007921665SSean Christopherson 			r = kvm_alloc_dirty_bitmap(new);
164107921665SSean Christopherson 			if (r)
164207921665SSean Christopherson 				return r;
164307921665SSean Christopherson 
164407921665SSean Christopherson 			if (kvm_dirty_log_manual_protect_and_init_set(kvm))
164507921665SSean Christopherson 				bitmap_set(new->dirty_bitmap, 0, new->npages);
164607921665SSean Christopherson 		}
164707921665SSean Christopherson 	}
16486c7b2202SPaolo Bonzini 
16496c7b2202SPaolo Bonzini 	r = kvm_arch_prepare_memory_region(kvm, old, new, change);
16506c7b2202SPaolo Bonzini 
16516c7b2202SPaolo Bonzini 	/* Free the bitmap on failure if it was allocated above. */
16526c7b2202SPaolo Bonzini 	if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
16536c7b2202SPaolo Bonzini 		kvm_destroy_dirty_bitmap(new);
165407921665SSean Christopherson 
165507921665SSean Christopherson 	return r;
1656a54d8066SMaciej S. Szmigiero }
1657a54d8066SMaciej S. Szmigiero 
kvm_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1658a54d8066SMaciej S. Szmigiero static void kvm_commit_memory_region(struct kvm *kvm,
1659a54d8066SMaciej S. Szmigiero 				     struct kvm_memory_slot *old,
1660a54d8066SMaciej S. Szmigiero 				     const struct kvm_memory_slot *new,
1661a54d8066SMaciej S. Szmigiero 				     enum kvm_mr_change change)
166207921665SSean Christopherson {
1663a54d8066SMaciej S. Szmigiero 	int old_flags = old ? old->flags : 0;
1664a54d8066SMaciej S. Szmigiero 	int new_flags = new ? new->flags : 0;
1665a54d8066SMaciej S. Szmigiero 	/*
1666a54d8066SMaciej S. Szmigiero 	 * Update the total number of memslot pages before calling the arch
1667a54d8066SMaciej S. Szmigiero 	 * hook so that architectures can consume the result directly.
1668a54d8066SMaciej S. Szmigiero 	 */
1669a54d8066SMaciej S. Szmigiero 	if (change == KVM_MR_DELETE)
1670a54d8066SMaciej S. Szmigiero 		kvm->nr_memslot_pages -= old->npages;
167107921665SSean Christopherson 	else if (change == KVM_MR_CREATE)
1672a54d8066SMaciej S. Szmigiero 		kvm->nr_memslot_pages += new->npages;
1673a54d8066SMaciej S. Szmigiero 
1674a54d8066SMaciej S. Szmigiero 	if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
1675a54d8066SMaciej S. Szmigiero 		int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
1676a54d8066SMaciej S. Szmigiero 		atomic_set(&kvm->nr_memslots_dirty_logging,
1677a54d8066SMaciej S. Szmigiero 			   atomic_read(&kvm->nr_memslots_dirty_logging) + change);
1678a54d8066SMaciej S. Szmigiero 	}
1679a54d8066SMaciej S. Szmigiero 
1680a54d8066SMaciej S. Szmigiero 	kvm_arch_commit_memory_region(kvm, old, new, change);
1681a54d8066SMaciej S. Szmigiero 
1682a54d8066SMaciej S. Szmigiero 	switch (change) {
1683a54d8066SMaciej S. Szmigiero 	case KVM_MR_CREATE:
1684a54d8066SMaciej S. Szmigiero 		/* Nothing more to do. */
1685a54d8066SMaciej S. Szmigiero 		break;
1686a54d8066SMaciej S. Szmigiero 	case KVM_MR_DELETE:
1687a54d8066SMaciej S. Szmigiero 		/* Free the old memslot and all its metadata. */
1688a54d8066SMaciej S. Szmigiero 		kvm_free_memslot(kvm, old);
1689a54d8066SMaciej S. Szmigiero 		break;
1690a54d8066SMaciej S. Szmigiero 	case KVM_MR_MOVE:
1691a54d8066SMaciej S. Szmigiero 	case KVM_MR_FLAGS_ONLY:
1692a54d8066SMaciej S. Szmigiero 		/*
1693a54d8066SMaciej S. Szmigiero 		 * Free the dirty bitmap as needed; the below check encompasses
1694a54d8066SMaciej S. Szmigiero 		 * both the flags and whether a ring buffer is being used)
1695a54d8066SMaciej S. Szmigiero 		 */
1696a54d8066SMaciej S. Szmigiero 		if (old->dirty_bitmap && !new->dirty_bitmap)
1697a54d8066SMaciej S. Szmigiero 			kvm_destroy_dirty_bitmap(old);
1698a54d8066SMaciej S. Szmigiero 
1699a54d8066SMaciej S. Szmigiero 		/*
1700a54d8066SMaciej S. Szmigiero 		 * The final quirk.  Free the detached, old slot, but only its
1701a54d8066SMaciej S. Szmigiero 		 * memory, not any metadata.  Metadata, including arch specific
1702a54d8066SMaciej S. Szmigiero 		 * data, may be reused by @new.
1703a54d8066SMaciej S. Szmigiero 		 */
1704a54d8066SMaciej S. Szmigiero 		kfree(old);
1705a54d8066SMaciej S. Szmigiero 		break;
1706a54d8066SMaciej S. Szmigiero 	default:
1707a54d8066SMaciej S. Szmigiero 		BUG();
1708a54d8066SMaciej S. Szmigiero 	}
1709a54d8066SMaciej S. Szmigiero }
1710a54d8066SMaciej S. Szmigiero 
1711a54d8066SMaciej S. Szmigiero /*
1712a54d8066SMaciej S. Szmigiero  * Activate @new, which must be installed in the inactive slots by the caller,
1713a54d8066SMaciej S. Szmigiero  * by swapping the active slots and then propagating @new to @old once @old is
1714a54d8066SMaciej S. Szmigiero  * unreachable and can be safely modified.
1715a54d8066SMaciej S. Szmigiero  *
1716a54d8066SMaciej S. Szmigiero  * With NULL @old this simply adds @new to @active (while swapping the sets).
1717a54d8066SMaciej S. Szmigiero  * With NULL @new this simply removes @old from @active and frees it
1718a54d8066SMaciej S. Szmigiero  * (while also swapping the sets).
1719a54d8066SMaciej S. Szmigiero  */
kvm_activate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1720a54d8066SMaciej S. Szmigiero static void kvm_activate_memslot(struct kvm *kvm,
1721244893faSSean Christopherson 				 struct kvm_memory_slot *old,
1722a54d8066SMaciej S. Szmigiero 				 struct kvm_memory_slot *new)
1723a54d8066SMaciej S. Szmigiero {
1724a54d8066SMaciej S. Szmigiero 	int as_id = kvm_memslots_get_as_id(old, new);
1725a54d8066SMaciej S. Szmigiero 
1726a54d8066SMaciej S. Szmigiero 	kvm_swap_active_memslots(kvm, as_id);
1727a54d8066SMaciej S. Szmigiero 
1728244893faSSean Christopherson 	/* Propagate the new memslot to the now inactive memslots. */
1729244893faSSean Christopherson 	kvm_replace_memslot(kvm, old, new);
1730244893faSSean Christopherson }
1731a54d8066SMaciej S. Szmigiero 
kvm_copy_memslot(struct kvm_memory_slot * dest,const struct kvm_memory_slot * src)1732a54d8066SMaciej S. Szmigiero static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1733a54d8066SMaciej S. Szmigiero 			     const struct kvm_memory_slot *src)
1734a54d8066SMaciej S. Szmigiero {
1735a54d8066SMaciej S. Szmigiero 	dest->base_gfn = src->base_gfn;
1736a54d8066SMaciej S. Szmigiero 	dest->npages = src->npages;
1737a54d8066SMaciej S. Szmigiero 	dest->dirty_bitmap = src->dirty_bitmap;
1738a54d8066SMaciej S. Szmigiero 	dest->arch = src->arch;
1739a54d8066SMaciej S. Szmigiero 	dest->userspace_addr = src->userspace_addr;
1740a54d8066SMaciej S. Szmigiero 	dest->flags = src->flags;
1741a54d8066SMaciej S. Szmigiero 	dest->id = src->id;
1742a54d8066SMaciej S. Szmigiero 	dest->as_id = src->as_id;
1743a54d8066SMaciej S. Szmigiero }
1744a54d8066SMaciej S. Szmigiero 
kvm_invalidate_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1745bcb63dcdSMaciej S. Szmigiero static void kvm_invalidate_memslot(struct kvm *kvm,
1746683412ccSMingwei Zhang 				   struct kvm_memory_slot *old,
1747a54d8066SMaciej S. Szmigiero 				   struct kvm_memory_slot *invalid_slot)
1748b0d23708SJun Miao {
1749a54d8066SMaciej S. Szmigiero 	/*
1750a54d8066SMaciej S. Szmigiero 	 * Mark the current slot INVALID.  As with all memslot modifications,
1751a54d8066SMaciej S. Szmigiero 	 * this must be done on an unreachable slot to avoid modifying the
1752a54d8066SMaciej S. Szmigiero 	 * current slot in the active tree.
1753a54d8066SMaciej S. Szmigiero 	 */
1754b0d23708SJun Miao 	kvm_copy_memslot(invalid_slot, old);
1755a54d8066SMaciej S. Szmigiero 	invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1756a54d8066SMaciej S. Szmigiero 	kvm_replace_memslot(kvm, old, invalid_slot);
1757a54d8066SMaciej S. Szmigiero 
1758244893faSSean Christopherson 	/*
1759a54d8066SMaciej S. Szmigiero 	 * Activate the slot that is now marked INVALID, but don't propagate
1760a54d8066SMaciej S. Szmigiero 	 * the slot to the now inactive slots. The slot is either going to be
1761a54d8066SMaciej S. Szmigiero 	 * deleted or recreated as a new slot.
1762244893faSSean Christopherson 	 */
1763a54d8066SMaciej S. Szmigiero 	kvm_swap_active_memslots(kvm, old->as_id);
1764244893faSSean Christopherson 
1765244893faSSean Christopherson 	/*
1766244893faSSean Christopherson 	 * From this point no new shadow pages pointing to a deleted, or moved,
1767a54d8066SMaciej S. Szmigiero 	 * memslot will be created.  Validation of sp->gfn happens in:
1768a54d8066SMaciej S. Szmigiero 	 *	- gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1769a54d8066SMaciej S. Szmigiero 	 *	- kvm_is_visible_gfn (mmu_check_root)
1770a54d8066SMaciej S. Szmigiero 	 */
1771a54d8066SMaciej S. Szmigiero 	kvm_arch_flush_shadow_memslot(kvm, old);
1772a54d8066SMaciej S. Szmigiero 	kvm_arch_guest_memory_reclaimed(kvm);
1773a54d8066SMaciej S. Szmigiero 
1774a54d8066SMaciej S. Szmigiero 	/* Was released by kvm_swap_active_memslots(), reacquire. */
1775244893faSSean Christopherson 	mutex_lock(&kvm->slots_arch_lock);
1776a54d8066SMaciej S. Szmigiero 
1777a54d8066SMaciej S. Szmigiero 	/*
1778a54d8066SMaciej S. Szmigiero 	 * Copy the arch-specific field of the newly-installed slot back to the
1779a54d8066SMaciej S. Szmigiero 	 * old slot as the arch data could have changed between releasing
1780a54d8066SMaciej S. Szmigiero 	 * slots_arch_lock in kvm_swap_active_memslots() and re-acquiring the lock
1781244893faSSean Christopherson 	 * above.  Writers are required to retrieve memslots *after* acquiring
1782a54d8066SMaciej S. Szmigiero 	 * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1783244893faSSean Christopherson 	 */
1784a54d8066SMaciej S. Szmigiero 	old->arch = invalid_slot->arch;
1785a54d8066SMaciej S. Szmigiero }
1786a54d8066SMaciej S. Szmigiero 
kvm_create_memslot(struct kvm * kvm,struct kvm_memory_slot * new)1787244893faSSean Christopherson static void kvm_create_memslot(struct kvm *kvm,
1788244893faSSean Christopherson 			       struct kvm_memory_slot *new)
1789a54d8066SMaciej S. Szmigiero {
1790244893faSSean Christopherson 	/* Add the new memslot to the inactive set and activate. */
1791244893faSSean Christopherson 	kvm_replace_memslot(kvm, NULL, new);
1792a54d8066SMaciej S. Szmigiero 	kvm_activate_memslot(kvm, NULL, new);
1793a54d8066SMaciej S. Szmigiero }
1794a54d8066SMaciej S. Szmigiero 
kvm_delete_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * invalid_slot)1795a54d8066SMaciej S. Szmigiero static void kvm_delete_memslot(struct kvm *kvm,
1796244893faSSean Christopherson 			       struct kvm_memory_slot *old,
1797a54d8066SMaciej S. Szmigiero 			       struct kvm_memory_slot *invalid_slot)
1798a54d8066SMaciej S. Szmigiero {
1799a54d8066SMaciej S. Szmigiero 	/*
1800a54d8066SMaciej S. Szmigiero 	 * Remove the old memslot (in the inactive memslots) by passing NULL as
1801a54d8066SMaciej S. Szmigiero 	 * the "new" slot, and for the invalid version in the active slots.
1802a54d8066SMaciej S. Szmigiero 	 */
1803244893faSSean Christopherson 	kvm_replace_memslot(kvm, old, NULL);
1804244893faSSean Christopherson 	kvm_activate_memslot(kvm, invalid_slot, NULL);
180507921665SSean Christopherson }
180607921665SSean Christopherson 
kvm_move_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,struct kvm_memory_slot * invalid_slot)1807cf47f50bSSean Christopherson static void kvm_move_memslot(struct kvm *kvm,
1808a54d8066SMaciej S. Szmigiero 			     struct kvm_memory_slot *old,
1809ce5f0215SSean Christopherson 			     struct kvm_memory_slot *new,
1810cf47f50bSSean Christopherson 			     struct kvm_memory_slot *invalid_slot)
1811cf47f50bSSean Christopherson {
1812244893faSSean Christopherson 	/*
1813cf47f50bSSean Christopherson 	 * Replace the old memslot in the inactive slots, and then swap slots
1814cf47f50bSSean Christopherson 	 * and replace the current INVALID with the new as well.
1815b10a038eSBen Gardon 	 */
1816b0d23708SJun Miao 	kvm_replace_memslot(kvm, old, new);
1817b10a038eSBen Gardon 	kvm_activate_memslot(kvm, invalid_slot, new);
1818b0d23708SJun Miao }
1819b0d23708SJun Miao 
kvm_update_flags_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new)1820b0d23708SJun Miao static void kvm_update_flags_memslot(struct kvm *kvm,
1821b10a038eSBen Gardon 				     struct kvm_memory_slot *old,
1822b10a038eSBen Gardon 				     struct kvm_memory_slot *new)
1823b10a038eSBen Gardon {
1824b10a038eSBen Gardon 	/*
1825b10a038eSBen Gardon 	 * Similar to the MOVE case, but the slot doesn't need to be zapped as
1826b10a038eSBen Gardon 	 * an intermediate step. Instead, the old memslot is simply replaced
1827b10a038eSBen Gardon 	 * with a new, updated copy in both memslot sets.
1828b10a038eSBen Gardon 	 */
1829b10a038eSBen Gardon 	kvm_replace_memslot(kvm, old, new);
1830b10a038eSBen Gardon 	kvm_activate_memslot(kvm, old, new);
1831cf47f50bSSean Christopherson }
1832a54d8066SMaciej S. Szmigiero 
kvm_set_memslot(struct kvm * kvm,struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1833a54d8066SMaciej S. Szmigiero static int kvm_set_memslot(struct kvm *kvm,
1834a54d8066SMaciej S. Szmigiero 			   struct kvm_memory_slot *old,
1835a54d8066SMaciej S. Szmigiero 			   struct kvm_memory_slot *new,
1836a54d8066SMaciej S. Szmigiero 			   enum kvm_mr_change change)
1837a54d8066SMaciej S. Szmigiero {
1838a54d8066SMaciej S. Szmigiero 	struct kvm_memory_slot *invalid_slot;
1839244893faSSean Christopherson 	int r;
1840244893faSSean Christopherson 
1841244893faSSean Christopherson 	/*
1842244893faSSean Christopherson 	 * Released in kvm_swap_active_memslots().
1843cf47f50bSSean Christopherson 	 *
1844244893faSSean Christopherson 	 * Must be held from before the current memslots are copied until after
1845244893faSSean Christopherson 	 * the new memslots are installed with rcu_assign_pointer, then
1846244893faSSean Christopherson 	 * released before the synchronize srcu in kvm_swap_active_memslots().
1847cf47f50bSSean Christopherson 	 *
1848cf47f50bSSean Christopherson 	 * When modifying memslots outside of the slots_lock, must be held
1849cf47f50bSSean Christopherson 	 * before reading the pointer to the current memslots until after all
1850244893faSSean Christopherson 	 * changes to those memslots are complete.
1851cf47f50bSSean Christopherson 	 *
1852cf47f50bSSean Christopherson 	 * These rules ensure that installing new memslots does not lose
1853a54d8066SMaciej S. Szmigiero 	 * changes made to the previous memslots.
1854a54d8066SMaciej S. Szmigiero 	 */
1855bda44d84SSean Christopherson 	mutex_lock(&kvm->slots_arch_lock);
1856a54d8066SMaciej S. Szmigiero 
1857a54d8066SMaciej S. Szmigiero 	/*
1858a54d8066SMaciej S. Szmigiero 	 * Invalidate the old slot if it's being deleted or moved.  This is
1859a54d8066SMaciej S. Szmigiero 	 * done prior to actually deleting/moving the memslot to allow vCPUs to
1860bda44d84SSean Christopherson 	 * continue running by ensuring there are no mappings or shadow pages
1861b10a038eSBen Gardon 	 * for the memslot when it is deleted/moved.  Without pre-invalidation
1862244893faSSean Christopherson 	 * (and without a lock), a window would exist between effecting the
1863244893faSSean Christopherson 	 * delete/move and committing the changes in arch code where KVM or a
1864b10a038eSBen Gardon 	 * guest could access a non-existent memslot.
1865b10a038eSBen Gardon 	 *
1866b10a038eSBen Gardon 	 * Modifications are done on a temporary, unreachable slot.  The old
1867cf47f50bSSean Christopherson 	 * slot needs to be preserved in case a later step fails and the
1868cf47f50bSSean Christopherson 	 * invalidation needs to be reverted.
1869cf47f50bSSean Christopherson 	 */
18709e9eb226SPeter Xu 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1871a54d8066SMaciej S. Szmigiero 		invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1872a54d8066SMaciej S. Szmigiero 		if (!invalid_slot) {
1873a54d8066SMaciej S. Szmigiero 			mutex_unlock(&kvm->slots_arch_lock);
1874a54d8066SMaciej S. Szmigiero 			return -ENOMEM;
1875a54d8066SMaciej S. Szmigiero 		}
18769e9eb226SPeter Xu 		kvm_invalidate_memslot(kvm, old, invalid_slot);
1877a54d8066SMaciej S. Szmigiero 	}
1878244893faSSean Christopherson 
1879a54d8066SMaciej S. Szmigiero 	r = kvm_prepare_memory_region(kvm, old, new, change);
1880244893faSSean Christopherson 	if (r) {
1881a54d8066SMaciej S. Szmigiero 		/*
1882244893faSSean Christopherson 		 * For DELETE/MOVE, revert the above INVALID change.  No
1883a54d8066SMaciej S. Szmigiero 		 * modifications required since the original slot was preserved
1884244893faSSean Christopherson 		 * in the inactive slots.  Changing the active memslots also
1885a54d8066SMaciej S. Szmigiero 		 * release slots_arch_lock.
1886a54d8066SMaciej S. Szmigiero 		 */
18875c0b4f3dSSean Christopherson 		if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1888244893faSSean Christopherson 			kvm_activate_memslot(kvm, invalid_slot, old);
1889244893faSSean Christopherson 			kfree(invalid_slot);
1890244893faSSean Christopherson 		} else {
1891244893faSSean Christopherson 			mutex_unlock(&kvm->slots_arch_lock);
1892a54d8066SMaciej S. Szmigiero 		}
1893a54d8066SMaciej S. Szmigiero 		return r;
1894a413a625STom Rix 	}
1895a54d8066SMaciej S. Szmigiero 
1896a54d8066SMaciej S. Szmigiero 	/*
1897a54d8066SMaciej S. Szmigiero 	 * For DELETE and MOVE, the working slot is now active as the INVALID
1898a54d8066SMaciej S. Szmigiero 	 * version of the old slot.  MOVE is particularly special as it reuses
1899a54d8066SMaciej S. Szmigiero 	 * the old slot and returns a copy of the old slot (in working_slot).
1900a54d8066SMaciej S. Szmigiero 	 * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1901a54d8066SMaciej S. Szmigiero 	 * old slot is detached but otherwise preserved.
190244401a20SMaciej S. Szmigiero 	 */
190344401a20SMaciej S. Szmigiero 	if (change == KVM_MR_CREATE)
190444401a20SMaciej S. Szmigiero 		kvm_create_memslot(kvm, new);
190544401a20SMaciej S. Szmigiero 	else if (change == KVM_MR_DELETE)
190644401a20SMaciej S. Szmigiero 		kvm_delete_memslot(kvm, old, invalid_slot);
190744401a20SMaciej S. Szmigiero 	else if (change == KVM_MR_MOVE)
190844401a20SMaciej S. Szmigiero 		kvm_move_memslot(kvm, old, new, invalid_slot);
190944401a20SMaciej S. Szmigiero 	else if (change == KVM_MR_FLAGS_ONLY)
191044401a20SMaciej S. Szmigiero 		kvm_update_flags_memslot(kvm, old, new);
191144401a20SMaciej S. Szmigiero 	else
191244401a20SMaciej S. Szmigiero 		BUG();
19135c0b4f3dSSean Christopherson 
19145c0b4f3dSSean Christopherson 	/* Free the temporary INVALID slot used for DELETE and MOVE. */
19150fce5623SAvi Kivity 	if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
19160fce5623SAvi Kivity 		kfree(invalid_slot);
19170fce5623SAvi Kivity 
19180fce5623SAvi Kivity 	/*
19190fce5623SAvi Kivity 	 * No need to refresh new->arch, changes after dropping slots_arch_lock
19200fce5623SAvi Kivity 	 * will directly hit the final, active memslot.  Architectures are
192102d5d55bSDominik Dingel 	 * responsible for knowing that new->arch may be stale.
19220fce5623SAvi Kivity 	 */
19230fce5623SAvi Kivity 	kvm_commit_memory_region(kvm, old, new, change);
192409170a49SPaolo Bonzini 
19250fce5623SAvi Kivity 	return 0;
1926244893faSSean Christopherson }
192744401a20SMaciej S. Szmigiero 
kvm_check_memslot_overlap(struct kvm_memslots * slots,int id,gfn_t start,gfn_t end)1928f64c0398STakuya Yoshikawa static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
19290f9bdef3SSean Christopherson 				      gfn_t start, gfn_t end)
19300f9bdef3SSean Christopherson {
1931163da372SSean Christopherson 	struct kvm_memslot_iter iter;
1932163da372SSean Christopherson 
19330fce5623SAvi Kivity 	kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1934a50d64d6SXiao Guangrong 		if (iter.slot->id != id)
1935a50d64d6SXiao Guangrong 			return true;
193671a4c30bSSean Christopherson 	}
1937a50d64d6SXiao Guangrong 
1938f481b069SPaolo Bonzini 	return false;
1939f481b069SPaolo Bonzini }
1940f481b069SPaolo Bonzini 
19410fce5623SAvi Kivity /*
19426b285a55SSean Christopherson  * Allocate some memory and give it an address in the guest physical address
19436b285a55SSean Christopherson  * space.
194471a4c30bSSean Christopherson  *
19450fce5623SAvi Kivity  * Discontiguous memory is allowed, mostly for framebuffers.
194671a4c30bSSean Christopherson  *
1947fa3d315aSTakuya Yoshikawa  * Must be called holding kvm->slots_lock for write.
194809d952c9SPaolo Bonzini  */
__kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem)1949139bc8a6SMarc Zyngier int __kvm_set_memory_region(struct kvm *kvm,
195096d4f267SLinus Torvalds 			    const struct kvm_userspace_memory_region *mem)
195109d952c9SPaolo Bonzini {
195271a4c30bSSean Christopherson 	struct kvm_memory_slot *old, *new;
1953f481b069SPaolo Bonzini 	struct kvm_memslots *slots;
195471a4c30bSSean Christopherson 	enum kvm_mr_change change;
19550fce5623SAvi Kivity 	unsigned long npages;
195671a4c30bSSean Christopherson 	gfn_t base_gfn;
19570f9bdef3SSean Christopherson 	int as_id, id;
19580f9bdef3SSean Christopherson 	int r;
19590fce5623SAvi Kivity 
196044401a20SMaciej S. Szmigiero 	r = check_memory_region_flags(mem);
19610fce5623SAvi Kivity 	if (r)
19625c0b4f3dSSean Christopherson 		return r;
19637cd08553SSean Christopherson 
19647cd08553SSean Christopherson 	as_id = mem->slot >> 16;
19655c0b4f3dSSean Christopherson 	id = (u16)mem->slot;
196644401a20SMaciej S. Szmigiero 
1967163da372SSean Christopherson 	/* General sanity checks */
196847ea7d90SSean Christopherson 	if ((mem->memory_size & (PAGE_SIZE - 1)) ||
19697cd08553SSean Christopherson 	    (mem->memory_size != (unsigned long)mem->memory_size))
197047ea7d90SSean Christopherson 		return -EINVAL;
197147ea7d90SSean Christopherson 	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
19727cd08553SSean Christopherson 		return -EINVAL;
197347ea7d90SSean Christopherson 	/* We can read the guest memory with __xxx_user() later on. */
197447ea7d90SSean Christopherson 	if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1975244893faSSean Christopherson 	    (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
19765c0b4f3dSSean Christopherson 	     !access_ok((void __user *)(unsigned long)mem->userspace_addr,
19775c0b4f3dSSean Christopherson 			mem->memory_size))
19780f9bdef3SSean Christopherson 		return -EINVAL;
19790f9bdef3SSean Christopherson 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
19805c0b4f3dSSean Christopherson 		return -EINVAL;
19817cd08553SSean Christopherson 	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1982f64c0398STakuya Yoshikawa 		return -EINVAL;
1983afa319a5SSean Christopherson 	if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
1984afa319a5SSean Christopherson 		return -EINVAL;
1985afa319a5SSean Christopherson 
1986afa319a5SSean Christopherson 	slots = __kvm_memslots(kvm, as_id);
1987afa319a5SSean Christopherson 
19880f9bdef3SSean Christopherson 	/*
1989afa319a5SSean Christopherson 	 * Note, the old memslot (and the pointer itself!) may be invalidated
19905c0b4f3dSSean Christopherson 	 * and/or destroyed by kvm_set_memslot().
19910f9bdef3SSean Christopherson 	 */
19920f9bdef3SSean Christopherson 	old = id_to_memslot(slots, id);
19930f9bdef3SSean Christopherson 
199471a4c30bSSean Christopherson 	if (!mem->memory_size) {
19950fce5623SAvi Kivity 		if (!old || !old->npages)
19960f9bdef3SSean Christopherson 			return -EINVAL;
1997f64c0398STakuya Yoshikawa 
19980f9bdef3SSean Christopherson 		if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
1999f64c0398STakuya Yoshikawa 			return -EIO;
200071a4c30bSSean Christopherson 
200171a4c30bSSean Christopherson 		return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
2002f64c0398STakuya Yoshikawa 	}
200309170a49SPaolo Bonzini 
200444401a20SMaciej S. Szmigiero 	base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
20050f9bdef3SSean Christopherson 	npages = (mem->memory_size >> PAGE_SHIFT);
200671a4c30bSSean Christopherson 
20070fce5623SAvi Kivity 	if (!old || !old->npages) {
2008244893faSSean Christopherson 		change = KVM_MR_CREATE;
2009244893faSSean Christopherson 
2010244893faSSean Christopherson 		/*
2011244893faSSean Christopherson 		 * To simplify KVM internals, the total number of pages across
20120f9bdef3SSean Christopherson 		 * all memslots must fit in an unsigned long.
2013244893faSSean Christopherson 		 */
2014244893faSSean Christopherson 		if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
2015244893faSSean Christopherson 			return -EINVAL;
2016244893faSSean Christopherson 	} else { /* Modify an existing slot. */
2017244893faSSean Christopherson 		if ((mem->userspace_addr != old->userspace_addr) ||
2018244893faSSean Christopherson 		    (npages != old->npages) ||
2019244893faSSean Christopherson 		    ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
2020244893faSSean Christopherson 			return -EINVAL;
202171a4c30bSSean Christopherson 
2022244893faSSean Christopherson 		if (base_gfn != old->base_gfn)
20230fce5623SAvi Kivity 			change = KVM_MR_MOVE;
20240fce5623SAvi Kivity 		else if (mem->flags != old->flags)
20250fce5623SAvi Kivity 			change = KVM_MR_FLAGS_ONLY;
20260fce5623SAvi Kivity 		else /* Nothing to change. */
20270fce5623SAvi Kivity 			return 0;
202809170a49SPaolo Bonzini 	}
20290fce5623SAvi Kivity 
20300fce5623SAvi Kivity 	if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
20310fce5623SAvi Kivity 	    kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
203279fac95eSMarcelo Tosatti 		return -EEXIST;
203347ae31e2STakuya Yoshikawa 
203479fac95eSMarcelo Tosatti 	/* Allocate a slot that will persist in the memslot. */
20350fce5623SAvi Kivity 	new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
20360fce5623SAvi Kivity 	if (!new)
20370fce5623SAvi Kivity 		return -ENOMEM;
20380fce5623SAvi Kivity 
20397940876eSStephen Hemminger 	new->as_id = as_id;
204047ae31e2STakuya Yoshikawa 	new->id = id;
20410fce5623SAvi Kivity 	new->base_gfn = base_gfn;
2042f481b069SPaolo Bonzini 	new->npages = npages;
20430fce5623SAvi Kivity 	new->flags = mem->flags;
204409170a49SPaolo Bonzini 	new->userspace_addr = mem->userspace_addr;
204547ae31e2STakuya Yoshikawa 
20460fce5623SAvi Kivity 	r = kvm_set_memslot(kvm, old, new, change);
20470fce5623SAvi Kivity 	if (r)
20480dff0846SSean Christopherson 		kfree(new);
20492a49f61dSSean Christopherson 	return r;
20502a49f61dSSean Christopherson }
20512a49f61dSSean Christopherson EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
20522a49f61dSSean Christopherson 
kvm_set_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem)20532a49f61dSSean Christopherson int kvm_set_memory_region(struct kvm *kvm,
20542a49f61dSSean Christopherson 			  const struct kvm_userspace_memory_region *mem)
20552a49f61dSSean Christopherson {
20562a49f61dSSean Christopherson 	int r;
20572a49f61dSSean Christopherson 
20580fce5623SAvi Kivity 	mutex_lock(&kvm->slots_lock);
20599f6b8029SPaolo Bonzini 	r = __kvm_set_memory_region(kvm, mem);
2060843574a3SMarkus Elfring 	mutex_unlock(&kvm->slots_lock);
206187bf6e7dSTakuya Yoshikawa 	return r;
20620fce5623SAvi Kivity }
20630fce5623SAvi Kivity EXPORT_SYMBOL_GPL(kvm_set_memory_region);
206486bdf3ebSGavin Shan 
kvm_vm_ioctl_set_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem)206586bdf3ebSGavin Shan static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2066b2cc64c4SPeter Xu 					  struct kvm_userspace_memory_region *mem)
2067b2cc64c4SPeter Xu {
20682a49f61dSSean Christopherson 	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
20692a49f61dSSean Christopherson 		return -EINVAL;
20702a49f61dSSean Christopherson 
2071f481b069SPaolo Bonzini 	return kvm_set_memory_region(kvm, mem);
2072f481b069SPaolo Bonzini }
2073f481b069SPaolo Bonzini 
2074843574a3SMarkus Elfring #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
20750fce5623SAvi Kivity /**
2076f481b069SPaolo Bonzini  * kvm_get_dirty_log - get a snapshot of dirty pages
20772a49f61dSSean Christopherson  * @kvm:	pointer to kvm instance
20780577d1abSSean Christopherson  * @log:	slot id and address to which we copy the log
2079843574a3SMarkus Elfring  * @is_dirty:	set to '1' if any dirty pages were found
20800fce5623SAvi Kivity  * @memslot:	set to the associated memslot, always valid on success
20812a49f61dSSean Christopherson  */
kvm_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log,int * is_dirty,struct kvm_memory_slot ** memslot)20822a49f61dSSean Christopherson int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
20832a49f61dSSean Christopherson 		      int *is_dirty, struct kvm_memory_slot **memslot)
20840fce5623SAvi Kivity {
20850fce5623SAvi Kivity 	struct kvm_memslots *slots;
20862a49f61dSSean Christopherson 	int i, as_id, id;
20870fce5623SAvi Kivity 	unsigned long n;
20882a49f61dSSean Christopherson 	unsigned long any = 0;
2089843574a3SMarkus Elfring 
20900fce5623SAvi Kivity 	/* Dirty ring tracking may be exclusive to dirty log tracking */
20910fce5623SAvi Kivity 	if (!kvm_use_dirty_bitmap(kvm))
20920fce5623SAvi Kivity 		return -ENXIO;
2093843574a3SMarkus Elfring 
20940fce5623SAvi Kivity 	*memslot = NULL;
20952ba9f0d8SAneesh Kumar K.V 	*is_dirty = 0;
20960fce5623SAvi Kivity 
20970dff0846SSean Christopherson 	as_id = log->slot >> 16;
2098ba0513b5SMario Smarduch 	id = (u16)log->slot;
2099b8b00220SJiang Biao 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
21002a31b9dbSPaolo Bonzini 		return -EINVAL;
2101ba0513b5SMario Smarduch 
2102ba0513b5SMario Smarduch 	slots = __kvm_memslots(kvm, as_id);
2103ba0513b5SMario Smarduch 	*memslot = id_to_memslot(slots, id);
2104ba0513b5SMario Smarduch 	if (!(*memslot) || !(*memslot)->dirty_bitmap)
2105ba0513b5SMario Smarduch 		return -ENOENT;
2106ba0513b5SMario Smarduch 
2107ba0513b5SMario Smarduch 	kvm_arch_sync_dirty_log(kvm, *memslot);
2108ba0513b5SMario Smarduch 
2109ba0513b5SMario Smarduch 	n = kvm_dirty_bitmap_bytes(*memslot);
2110ba0513b5SMario Smarduch 
2111ba0513b5SMario Smarduch 	for (i = 0; !any && i < n/sizeof(long); ++i)
2112ba0513b5SMario Smarduch 		any = (*memslot)->dirty_bitmap[i];
2113ba0513b5SMario Smarduch 
2114ba0513b5SMario Smarduch 	if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2115ba0513b5SMario Smarduch 		return -EFAULT;
2116ba0513b5SMario Smarduch 
2117ba0513b5SMario Smarduch 	if (any)
2118ba0513b5SMario Smarduch 		*is_dirty = 1;
21190dff0846SSean Christopherson 	return 0;
2120ba0513b5SMario Smarduch }
21219f6b8029SPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2122ba0513b5SMario Smarduch 
212358d6db34SMarkus Elfring #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2124ba0513b5SMario Smarduch /**
2125ba0513b5SMario Smarduch  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2126ba0513b5SMario Smarduch  *	and reenable dirty page tracking for the corresponding pages.
21270dff0846SSean Christopherson  * @kvm:	pointer to kvm instance
2128ba0513b5SMario Smarduch  * @log:	slot id and address to which we copy the log
212986bdf3ebSGavin Shan  *
213086bdf3ebSGavin Shan  * We need to keep it in mind that VCPU threads can write to the bitmap
2131b2cc64c4SPeter Xu  * concurrently. So, to avoid losing track of dirty pages we keep the
2132b2cc64c4SPeter Xu  * following order:
2133f481b069SPaolo Bonzini  *
2134f481b069SPaolo Bonzini  *    1. Take a snapshot of the bit and clear it if needed.
2135f481b069SPaolo Bonzini  *    2. Write protect the corresponding page.
213658d6db34SMarkus Elfring  *    3. Copy the snapshot to the userspace.
2137ba0513b5SMario Smarduch  *    4. Upon return caller flushes TLB's if needed.
2138f481b069SPaolo Bonzini  *
2139f481b069SPaolo Bonzini  * Between 2 and 4, the guest may write to the page using the remaining TLB
21400577d1abSSean Christopherson  * entry.  This is not a problem because the page is reported dirty using
21410577d1abSSean Christopherson  * the snapshot taken before and step 4 ensures that writes done after
2142ba0513b5SMario Smarduch  * exiting to userspace will be logged for the next call.
2143ba0513b5SMario Smarduch  *
2144ba0513b5SMario Smarduch  */
kvm_get_dirty_log_protect(struct kvm * kvm,struct kvm_dirty_log * log)21450dff0846SSean Christopherson static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
21460dff0846SSean Christopherson {
2147ba0513b5SMario Smarduch 	struct kvm_memslots *slots;
21480dff0846SSean Christopherson 	struct kvm_memory_slot *memslot;
21492a31b9dbSPaolo Bonzini 	int i, as_id, id;
21502a31b9dbSPaolo Bonzini 	unsigned long n;
21512a31b9dbSPaolo Bonzini 	unsigned long *dirty_bitmap;
21522a31b9dbSPaolo Bonzini 	unsigned long *dirty_bitmap_buffer;
21532a31b9dbSPaolo Bonzini 	bool flush;
21542a31b9dbSPaolo Bonzini 
21552a31b9dbSPaolo Bonzini 	/* Dirty ring tracking may be exclusive to dirty log tracking */
21562a31b9dbSPaolo Bonzini 	if (!kvm_use_dirty_bitmap(kvm))
21572a31b9dbSPaolo Bonzini 		return -ENXIO;
21582a31b9dbSPaolo Bonzini 
21592a31b9dbSPaolo Bonzini 	as_id = log->slot >> 16;
216003133347SClaudio Imbrenda 	id = (u16)log->slot;
2161ba0513b5SMario Smarduch 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2162ba0513b5SMario Smarduch 		return -EINVAL;
2163531810caSBen Gardon 
2164ba0513b5SMario Smarduch 	slots = __kvm_memslots(kvm, as_id);
2165ba0513b5SMario Smarduch 	memslot = id_to_memslot(slots, id);
2166ba0513b5SMario Smarduch 	if (!memslot || !memslot->dirty_bitmap)
2167ba0513b5SMario Smarduch 		return -ENOENT;
2168ba0513b5SMario Smarduch 
2169ba0513b5SMario Smarduch 	dirty_bitmap = memslot->dirty_bitmap;
2170ba0513b5SMario Smarduch 
21710dff0846SSean Christopherson 	kvm_arch_sync_dirty_log(kvm, memslot);
2172ba0513b5SMario Smarduch 
2173ba0513b5SMario Smarduch 	n = kvm_dirty_bitmap_bytes(memslot);
2174ba0513b5SMario Smarduch 	flush = false;
2175ba0513b5SMario Smarduch 	if (kvm->manual_dirty_log_protect) {
217658d2930fSTakuya Yoshikawa 		/*
217758d2930fSTakuya Yoshikawa 		 * Unlike kvm_get_dirty_log, we always return false in *flush,
217858d2930fSTakuya Yoshikawa 		 * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2179531810caSBen Gardon 		 * is some code duplication between this function and
21802a31b9dbSPaolo Bonzini 		 * kvm_get_dirty_log, but hopefully all architecture
21812a31b9dbSPaolo Bonzini 		 * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
21820dff0846SSean Christopherson 		 * can be eliminated.
21830dff0846SSean Christopherson 		 */
21840dff0846SSean Christopherson 		dirty_bitmap_buffer = dirty_bitmap;
2185ba0513b5SMario Smarduch 	} else {
218658d6db34SMarkus Elfring 		dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
218758d6db34SMarkus Elfring 		memset(dirty_bitmap_buffer, 0, n);
2188ba0513b5SMario Smarduch 
21890dff0846SSean Christopherson 		KVM_MMU_LOCK(kvm);
21900dff0846SSean Christopherson 		for (i = 0; i < n / sizeof(long); i++) {
21910dff0846SSean Christopherson 			unsigned long mask;
21920dff0846SSean Christopherson 			gfn_t offset;
21930dff0846SSean Christopherson 
21940dff0846SSean Christopherson 			if (!dirty_bitmap[i])
21950dff0846SSean Christopherson 				continue;
21960dff0846SSean Christopherson 
21970dff0846SSean Christopherson 			flush = true;
21980dff0846SSean Christopherson 			mask = xchg(&dirty_bitmap[i], 0);
21990dff0846SSean Christopherson 			dirty_bitmap_buffer[i] = mask;
22000dff0846SSean Christopherson 
22010dff0846SSean Christopherson 			offset = i * BITS_PER_LONG;
22020dff0846SSean Christopherson 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
22030dff0846SSean Christopherson 								offset, mask);
22040dff0846SSean Christopherson 		}
22050dff0846SSean Christopherson 		KVM_MMU_UNLOCK(kvm);
22060dff0846SSean Christopherson 	}
22070dff0846SSean Christopherson 
22080dff0846SSean Christopherson 	if (flush)
22090dff0846SSean Christopherson 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
22100dff0846SSean Christopherson 
22110dff0846SSean Christopherson 	if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
22120dff0846SSean Christopherson 		return -EFAULT;
22130dff0846SSean Christopherson 	return 0;
22140dff0846SSean Christopherson }
22150dff0846SSean Christopherson 
22160dff0846SSean Christopherson 
22170dff0846SSean Christopherson /**
22180dff0846SSean Christopherson  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
22190dff0846SSean Christopherson  * @kvm: kvm instance
22200dff0846SSean Christopherson  * @log: slot id and address to which we copy the log
22210dff0846SSean Christopherson  *
22222a31b9dbSPaolo Bonzini  * Steps 1-4 below provide general overview of dirty page logging. See
22232a31b9dbSPaolo Bonzini  * kvm_get_dirty_log_protect() function description for additional details.
22242a31b9dbSPaolo Bonzini  *
22252a31b9dbSPaolo Bonzini  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
22262a31b9dbSPaolo Bonzini  * always flush the TLB (step 4) even if previous step failed  and the dirty
22272a31b9dbSPaolo Bonzini  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
22282a31b9dbSPaolo Bonzini  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
22290dff0846SSean Christopherson  * writes will be marked dirty for next log read.
22300dff0846SSean Christopherson  *
22312a31b9dbSPaolo Bonzini  *   1. Take a snapshot of the bit and clear it if needed.
22322a31b9dbSPaolo Bonzini  *   2. Write protect the corresponding page.
22332a31b9dbSPaolo Bonzini  *   3. Copy the snapshot to the userspace.
223498938aa8STomas Bortoli  *   4. Flush TLB's if needed.
22352a31b9dbSPaolo Bonzini  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)223698938aa8STomas Bortoli static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
22372a31b9dbSPaolo Bonzini 				      struct kvm_dirty_log *log)
22382a31b9dbSPaolo Bonzini {
22390dff0846SSean Christopherson 	int r;
22402a31b9dbSPaolo Bonzini 
224186bdf3ebSGavin Shan 	mutex_lock(&kvm->slots_lock);
224286bdf3ebSGavin Shan 
2243b2cc64c4SPeter Xu 	r = kvm_get_dirty_log_protect(kvm, log);
2244b2cc64c4SPeter Xu 
22452a31b9dbSPaolo Bonzini 	mutex_unlock(&kvm->slots_lock);
22462a31b9dbSPaolo Bonzini 	return r;
22472a31b9dbSPaolo Bonzini }
22482a31b9dbSPaolo Bonzini 
22492a31b9dbSPaolo Bonzini /**
225076d58e0fSPaolo Bonzini  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
22512a31b9dbSPaolo Bonzini  *	and reenable dirty page tracking for the corresponding pages.
22522a31b9dbSPaolo Bonzini  * @kvm:	pointer to kvm instance
22532a31b9dbSPaolo Bonzini  * @log:	slot id and address from which to fetch the bitmap of dirty pages
22542a31b9dbSPaolo Bonzini  */
kvm_clear_dirty_log_protect(struct kvm * kvm,struct kvm_clear_dirty_log * log)22550577d1abSSean Christopherson static int kvm_clear_dirty_log_protect(struct kvm *kvm,
22560577d1abSSean Christopherson 				       struct kvm_clear_dirty_log *log)
22572a31b9dbSPaolo Bonzini {
22582a31b9dbSPaolo Bonzini 	struct kvm_memslots *slots;
22592a31b9dbSPaolo Bonzini 	struct kvm_memory_slot *memslot;
22604ddc9204SPeter Xu 	int as_id, id;
226198938aa8STomas Bortoli 	gfn_t offset;
226298938aa8STomas Bortoli 	unsigned long i, n;
226376d58e0fSPaolo Bonzini 	unsigned long *dirty_bitmap;
226476d58e0fSPaolo Bonzini 	unsigned long *dirty_bitmap_buffer;
226598938aa8STomas Bortoli 	bool flush;
226698938aa8STomas Bortoli 
22670dff0846SSean Christopherson 	/* Dirty ring tracking may be exclusive to dirty log tracking */
22680dff0846SSean Christopherson 	if (!kvm_use_dirty_bitmap(kvm))
22690dff0846SSean Christopherson 		return -ENXIO;
22702a31b9dbSPaolo Bonzini 
22712a31b9dbSPaolo Bonzini 	as_id = log->slot >> 16;
22722a31b9dbSPaolo Bonzini 	id = (u16)log->slot;
22732a31b9dbSPaolo Bonzini 	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2274531810caSBen Gardon 		return -EINVAL;
227553eac7a8SPeter Xu 
227653eac7a8SPeter Xu 	if (log->first_page & 63)
22772a31b9dbSPaolo Bonzini 		return -EINVAL;
22782a31b9dbSPaolo Bonzini 
22792a31b9dbSPaolo Bonzini 	slots = __kvm_memslots(kvm, as_id);
22802a31b9dbSPaolo Bonzini 	memslot = id_to_memslot(slots, id);
22812a31b9dbSPaolo Bonzini 	if (!memslot || !memslot->dirty_bitmap)
22822a31b9dbSPaolo Bonzini 		return -ENOENT;
22832a31b9dbSPaolo Bonzini 
22842a31b9dbSPaolo Bonzini 	dirty_bitmap = memslot->dirty_bitmap;
22852a31b9dbSPaolo Bonzini 
22862a31b9dbSPaolo Bonzini 	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
22872a31b9dbSPaolo Bonzini 
22882a31b9dbSPaolo Bonzini 	if (log->first_page > memslot->npages ||
22892a31b9dbSPaolo Bonzini 	    log->num_pages > memslot->npages - log->first_page ||
22902a31b9dbSPaolo Bonzini 	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
22912a31b9dbSPaolo Bonzini 	    return -EINVAL;
22920dff0846SSean Christopherson 
22932a31b9dbSPaolo Bonzini 	kvm_arch_sync_dirty_log(kvm, memslot);
22942a31b9dbSPaolo Bonzini 
22952a31b9dbSPaolo Bonzini 	flush = false;
22962a31b9dbSPaolo Bonzini 	dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2297531810caSBen Gardon 	if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
22982a31b9dbSPaolo Bonzini 		return -EFAULT;
22990dff0846SSean Christopherson 
23000dff0846SSean Christopherson 	KVM_MMU_LOCK(kvm);
23010dff0846SSean Christopherson 	for (offset = log->first_page, i = offset / BITS_PER_LONG,
23022a31b9dbSPaolo Bonzini 		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
23032a31b9dbSPaolo Bonzini 	     i++, offset += BITS_PER_LONG) {
23040dff0846SSean Christopherson 		unsigned long mask = *dirty_bitmap_buffer++;
23050dff0846SSean Christopherson 		atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
23060dff0846SSean Christopherson 		if (!mask)
23070dff0846SSean Christopherson 			continue;
23080dff0846SSean Christopherson 
23090dff0846SSean Christopherson 		mask &= atomic_long_fetch_andnot(mask, p);
23100dff0846SSean Christopherson 
23110dff0846SSean Christopherson 		/*
23120dff0846SSean Christopherson 		 * mask contains the bits that really have been cleared.  This
23130dff0846SSean Christopherson 		 * never includes any bits beyond the length of the memslot (if
23140dff0846SSean Christopherson 		 * the length is not aligned to 64 pages), therefore it is not
23150dff0846SSean Christopherson 		 * a problem if userspace sets them in log->dirty_bitmap.
23160dff0846SSean Christopherson 		*/
23170dff0846SSean Christopherson 		if (mask) {
2318ba0513b5SMario Smarduch 			flush = true;
231949c7754cSGleb Natapov 			kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
232049c7754cSGleb Natapov 								offset, mask);
232149c7754cSGleb Natapov 		}
232249c7754cSGleb Natapov 	}
2323a1f4d395SAvi Kivity 	KVM_MMU_UNLOCK(kvm);
23240fce5623SAvi Kivity 
23258e73485cSPaolo Bonzini 	if (flush)
23268e73485cSPaolo Bonzini 		kvm_flush_remote_tlbs_memslot(kvm, memslot);
2327fe22ed82SDavid Matlack 
2328a54d8066SMaciej S. Szmigiero 	return 0;
2329fe22ed82SDavid Matlack }
2330fe22ed82SDavid Matlack 
kvm_vm_ioctl_clear_dirty_log(struct kvm * kvm,struct kvm_clear_dirty_log * log)2331a54d8066SMaciej S. Szmigiero static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2332a54d8066SMaciej S. Szmigiero 					struct kvm_clear_dirty_log *log)
2333a54d8066SMaciej S. Szmigiero {
2334a54d8066SMaciej S. Szmigiero 	int r;
2335a54d8066SMaciej S. Szmigiero 
2336a54d8066SMaciej S. Szmigiero 	mutex_lock(&kvm->slots_lock);
2337a54d8066SMaciej S. Szmigiero 
2338a54d8066SMaciej S. Szmigiero 	r = kvm_clear_dirty_log_protect(kvm, log);
2339a54d8066SMaciej S. Szmigiero 
2340a54d8066SMaciej S. Szmigiero 	mutex_unlock(&kvm->slots_lock);
2341fe22ed82SDavid Matlack 	return r;
2342fe22ed82SDavid Matlack }
2343fe22ed82SDavid Matlack #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2344fe22ed82SDavid Matlack 
gfn_to_memslot(struct kvm * kvm,gfn_t gfn)2345fe22ed82SDavid Matlack struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2346fe22ed82SDavid Matlack {
2347a54d8066SMaciej S. Szmigiero 	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2348fe22ed82SDavid Matlack }
2349a54d8066SMaciej S. Szmigiero EXPORT_SYMBOL_GPL(gfn_to_memslot);
2350fe22ed82SDavid Matlack 
kvm_vcpu_gfn_to_memslot(struct kvm_vcpu * vcpu,gfn_t gfn)2351a54d8066SMaciej S. Szmigiero struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2352fe22ed82SDavid Matlack {
2353fe22ed82SDavid Matlack 	struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2354fe22ed82SDavid Matlack 	u64 gen = slots->generation;
2355fe22ed82SDavid Matlack 	struct kvm_memory_slot *slot;
23568e73485cSPaolo Bonzini 
23578e73485cSPaolo Bonzini 	/*
235833e94154SYaowei Bai 	 * This also protects against using a memslot from a different address space,
23590fce5623SAvi Kivity 	 * since different address spaces have different generation numbers.
2360bf3e05bcSXiao Guangrong 	 */
23610fce5623SAvi Kivity 	if (unlikely(gen != vcpu->last_used_slot_gen)) {
2362c36b7150SPaolo Bonzini 		vcpu->last_used_slot = NULL;
23630fce5623SAvi Kivity 		vcpu->last_used_slot_gen = gen;
23640fce5623SAvi Kivity 	}
23650fce5623SAvi Kivity 
2366995decb6SVitaly Kuznetsov 	slot = try_get_memslot(vcpu->last_used_slot, gfn);
2367995decb6SVitaly Kuznetsov 	if (slot)
2368995decb6SVitaly Kuznetsov 		return slot;
2369995decb6SVitaly Kuznetsov 
2370995decb6SVitaly Kuznetsov 	/*
2371995decb6SVitaly Kuznetsov 	 * Fall back to searching all memslots. We purposely use
2372995decb6SVitaly Kuznetsov 	 * search_memslots() instead of __gfn_to_memslot() to avoid
2373995decb6SVitaly Kuznetsov 	 * thrashing the VM-wide last_used_slot in kvm_memslots.
2374f9b84e19SSean Christopherson 	 */
23758f0b1ab6SJoerg Roedel 	slot = search_memslots(slots, gfn, false);
23768f0b1ab6SJoerg Roedel 	if (slot) {
23778f0b1ab6SJoerg Roedel 		vcpu->last_used_slot = slot;
23788f0b1ab6SJoerg Roedel 		return slot;
23798f0b1ab6SJoerg Roedel 	}
23808f0b1ab6SJoerg Roedel 
238142cde48bSSean Christopherson 	return NULL;
23828f0b1ab6SJoerg Roedel }
23838f0b1ab6SJoerg Roedel 
kvm_is_visible_gfn(struct kvm * kvm,gfn_t gfn)23848f0b1ab6SJoerg Roedel bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2385d8ed45c5SMichel Lespinasse {
23868f0b1ab6SJoerg Roedel 	struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
23878f0b1ab6SJoerg Roedel 
23888f0b1ab6SJoerg Roedel 	return kvm_is_visible_memslot(memslot);
23898f0b1ab6SJoerg Roedel }
23908f0b1ab6SJoerg Roedel EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
23918f0b1ab6SJoerg Roedel 
kvm_vcpu_is_visible_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)23928f0b1ab6SJoerg Roedel bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2393d8ed45c5SMichel Lespinasse {
23948f0b1ab6SJoerg Roedel 	struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
23958f0b1ab6SJoerg Roedel 
23968f0b1ab6SJoerg Roedel 	return kvm_is_visible_memslot(memslot);
23978f0b1ab6SJoerg Roedel }
23988283e36aSBen Gardon EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
23994d8b81abSXiao Guangrong 
kvm_host_page_size(struct kvm_vcpu * vcpu,gfn_t gfn)24004d8b81abSXiao Guangrong unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
24014d8b81abSXiao Guangrong {
24024d8b81abSXiao Guangrong 	struct vm_area_struct *vma;
24038283e36aSBen Gardon 	unsigned long addr, size;
24044d8b81abSXiao Guangrong 
24050fce5623SAvi Kivity 	size = PAGE_SIZE;
2406bc6678a3SMarcelo Tosatti 
2407ca3a490cSXiao Guangrong 	addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
240848987781SXiao Guangrong 	if (kvm_is_error_hva(addr))
24094d8b81abSXiao Guangrong 		return PAGE_SIZE;
24104d8b81abSXiao Guangrong 
241148987781SXiao Guangrong 	mmap_read_lock(current->mm);
241248987781SXiao Guangrong 	vma = find_vma(current->mm, addr);
241348987781SXiao Guangrong 	if (!vma)
241448987781SXiao Guangrong 		goto out;
24154d8b81abSXiao Guangrong 
24160fce5623SAvi Kivity 	size = vma_kernel_pagesize(vma);
241748987781SXiao Guangrong 
24184d8b81abSXiao Guangrong out:
24194d8b81abSXiao Guangrong 	mmap_read_unlock(current->mm);
24204d8b81abSXiao Guangrong 
24214d8b81abSXiao Guangrong 	return size;
24224d8b81abSXiao Guangrong }
24234d8b81abSXiao Guangrong 
memslot_is_readonly(const struct kvm_memory_slot * slot)24244d8b81abSXiao Guangrong static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
24254d8b81abSXiao Guangrong {
24264d8b81abSXiao Guangrong 	return slot->flags & KVM_MEM_READONLY;
24274d8b81abSXiao Guangrong }
24284d8b81abSXiao Guangrong 
__gfn_to_hva_many(const struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages,bool write)24294d8b81abSXiao Guangrong static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
24304d8b81abSXiao Guangrong 				       gfn_t *nr_pages, bool write)
243148987781SXiao Guangrong {
243248987781SXiao Guangrong 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
243349c7754cSGleb Natapov 		return KVM_HVA_ERR_BAD;
243448987781SXiao Guangrong 
24350d150298SSheng Yang 	if (memslot_is_readonly(slot) && write)
24360fce5623SAvi Kivity 		return KVM_HVA_ERR_RO_BAD;
24378e73485cSPaolo Bonzini 
24388e73485cSPaolo Bonzini 	if (nr_pages)
24398e73485cSPaolo Bonzini 		*nr_pages = slot->npages - (gfn - slot->base_gfn);
24408e73485cSPaolo Bonzini 
24418e73485cSPaolo Bonzini 	return __gfn_to_hva_memslot(slot, gfn);
24428e73485cSPaolo Bonzini }
244386ab8cffSXiao Guangrong 
gfn_to_hva_many(struct kvm_memory_slot * slot,gfn_t gfn,gfn_t * nr_pages)2444970c0d4bSWei Yang static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2445970c0d4bSWei Yang 				     gfn_t *nr_pages)
2446970c0d4bSWei Yang {
2447970c0d4bSWei Yang 	return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2448970c0d4bSWei Yang }
2449970c0d4bSWei Yang 
gfn_to_hva_memslot(struct kvm_memory_slot * slot,gfn_t gfn)245086ab8cffSXiao Guangrong unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
245164d83126SChristoffer Dall 					gfn_t gfn)
245264d83126SChristoffer Dall {
24538030089fSGleb Natapov 	return gfn_to_hva_many(slot, gfn, NULL);
2454a2ac07feSGleb Natapov }
2455a2ac07feSGleb Natapov EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2456a2ac07feSGleb Natapov 
gfn_to_hva(struct kvm * kvm,gfn_t gfn)2457ba6a3541SPaolo Bonzini unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2458ba6a3541SPaolo Bonzini {
2459a2ac07feSGleb Natapov 	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
246086ab8cffSXiao Guangrong }
246186ab8cffSXiao Guangrong EXPORT_SYMBOL_GPL(gfn_to_hva);
246264d83126SChristoffer Dall 
kvm_vcpu_gfn_to_hva(struct kvm_vcpu * vcpu,gfn_t gfn)246364d83126SChristoffer Dall unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
246464d83126SChristoffer Dall {
246564d83126SChristoffer Dall 	return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
246664d83126SChristoffer Dall }
246764d83126SChristoffer Dall EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
246864d83126SChristoffer Dall 
24698e73485cSPaolo Bonzini /*
24708e73485cSPaolo Bonzini  * Return the hva of a @gfn and the R/W attribute if possible.
24718e73485cSPaolo Bonzini  *
24728e73485cSPaolo Bonzini  * @slot: the kvm_memory_slot which contains @gfn
24738e73485cSPaolo Bonzini  * @gfn: the gfn to be translated
24748e73485cSPaolo Bonzini  * @writable: used to return the read/write attribute of the @slot if the hva
24758e73485cSPaolo Bonzini  * is valid and @writable is not NULL
2476fafc3dbaSHuang Ying  */
gfn_to_hva_memslot_prot(struct kvm_memory_slot * slot,gfn_t gfn,bool * writable)2477fafc3dbaSHuang Ying unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
24780d731759SLorenzo Stoakes 				      gfn_t gfn, bool *writable)
2479fafc3dbaSHuang Ying {
248054d02069SLorenzo Stoakes 	unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2481fafc3dbaSHuang Ying 
2482fafc3dbaSHuang Ying 	if (!kvm_is_error_hva(hva) && writable)
2483fafc3dbaSHuang Ying 		*writable = !memslot_is_readonly(slot);
24842fc84311SXiao Guangrong 
2485b9b33da2SPaolo Bonzini 	return hva;
2486b9b33da2SPaolo Bonzini }
2487311497e0SMiaohe Lin 
gfn_to_hva_prot(struct kvm * kvm,gfn_t gfn,bool * writable)24882fc84311SXiao Guangrong unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2489b9b33da2SPaolo Bonzini {
2490b9b33da2SPaolo Bonzini 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
24910fce5623SAvi Kivity 
24920fce5623SAvi Kivity 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
24930fce5623SAvi Kivity }
249412ce13feSXiao Guangrong 
kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu * vcpu,gfn_t gfn,bool * writable)249512ce13feSXiao Guangrong unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
249612ce13feSXiao Guangrong {
249712ce13feSXiao Guangrong 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
249812ce13feSXiao Guangrong 
249912ce13feSXiao Guangrong 	return gfn_to_hva_memslot_prot(slot, gfn, writable);
250012ce13feSXiao Guangrong }
250112ce13feSXiao Guangrong 
check_user_page_hwpoison(unsigned long addr)2502dadbb612SSouptick Joarder static inline int check_user_page_hwpoison(unsigned long addr)
25032fc84311SXiao Guangrong {
2504612819c3SMarcelo Tosatti 	int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2505612819c3SMarcelo Tosatti 
2506612819c3SMarcelo Tosatti 	rc = get_user_pages(addr, 1, flags, NULL);
25072fc84311SXiao Guangrong 	return rc == -EHWPOISON;
25082fc84311SXiao Guangrong }
2509612819c3SMarcelo Tosatti 
25102fc84311SXiao Guangrong /*
25112fc84311SXiao Guangrong  * The fast path to get the writable pfn which will be stored in @pfn,
2512af585b92SGleb Natapov  * true indicates success, otherwise false is returned.  It's also the
25132fc84311SXiao Guangrong  * only part that runs if we can in atomic context.
25142fc84311SXiao Guangrong  */
hva_to_pfn_fast(unsigned long addr,bool write_fault,bool * writable,kvm_pfn_t * pfn)25152fc84311SXiao Guangrong static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
25162fc84311SXiao Guangrong 			    bool *writable, kvm_pfn_t *pfn)
25172fc84311SXiao Guangrong {
2518c8b88b33SPeter Xu 	struct page *page[1];
25192fc84311SXiao Guangrong 
2520*b1e1296dSDavid Hildenbrand 	/*
2521*b1e1296dSDavid Hildenbrand 	 * Fast pin a writable pfn only if it is a write fault request
2522*b1e1296dSDavid Hildenbrand 	 * or the caller allows to map a writable pfn for a read fault
2523*b1e1296dSDavid Hildenbrand 	 * request.
2524*b1e1296dSDavid Hildenbrand 	 */
2525*b1e1296dSDavid Hildenbrand 	if (!(write_fault || writable))
2526*b1e1296dSDavid Hildenbrand 		return false;
2527*b1e1296dSDavid Hildenbrand 
2528*b1e1296dSDavid Hildenbrand 	if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2529*b1e1296dSDavid Hildenbrand 		*pfn = page_to_pfn(page[0]);
2530*b1e1296dSDavid Hildenbrand 
2531*b1e1296dSDavid Hildenbrand 		if (writable)
2532ce53053cSAl Viro 			*writable = true;
253328249139SLi kunyu 		return true;
25342fc84311SXiao Guangrong 	}
25350fce5623SAvi Kivity 
2536612819c3SMarcelo Tosatti 	return false;
2537612819c3SMarcelo Tosatti }
2538612819c3SMarcelo Tosatti 
2539612819c3SMarcelo Tosatti /*
2540d4944b0eSLorenzo Stoakes  * The slow path to get the pfn of the specified host virtual address,
2541d4944b0eSLorenzo Stoakes  * 1 indicates success, -errno is returned if error is detected.
2542ce53053cSAl Viro  */
hva_to_pfn_slow(unsigned long addr,bool * async,bool write_fault,bool interruptible,bool * writable,kvm_pfn_t * pfn)2543ce53053cSAl Viro static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2544c8b88b33SPeter Xu 			   bool interruptible, bool *writable, kvm_pfn_t *pfn)
2545c8b88b33SPeter Xu {
2546d4944b0eSLorenzo Stoakes 	/*
2547ce53053cSAl Viro 	 * When a VCPU accesses a page that is not mapped into the secondary
25482fc84311SXiao Guangrong 	 * MMU, we lookup the page using GUP to map it, so the guest VCPU can
25492fc84311SXiao Guangrong 	 * make progress. We always want to honor NUMA hinting faults in that
2550612819c3SMarcelo Tosatti 	 * case, because GUP usage corresponds to memory accesses from the VCPU.
2551612819c3SMarcelo Tosatti 	 * Otherwise, we'd not trigger NUMA hinting faults once a page is
255212ce13feSXiao Guangrong 	 * mapped into the secondary MMU and gets accessed by a VCPU.
2553ce53053cSAl Viro 	 *
2554612819c3SMarcelo Tosatti 	 * Note that get_user_page_fast_only() and FOLL_WRITE for now
2555dadbb612SSouptick Joarder 	 * implicitly honor NUMA hinting faults and don't need this flag.
2556612819c3SMarcelo Tosatti 	 */
2557ce53053cSAl Viro 	unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
2558ce53053cSAl Viro 	struct page *page;
2559612819c3SMarcelo Tosatti 	int npages;
2560612819c3SMarcelo Tosatti 
2561ce53053cSAl Viro 	might_sleep();
25622fc84311SXiao Guangrong 
2563887c08acSXiao Guangrong 	if (writable)
25640fce5623SAvi Kivity 		*writable = write_fault;
25654d8b81abSXiao Guangrong 
25664d8b81abSXiao Guangrong 	if (write_fault)
25674d8b81abSXiao Guangrong 		flags |= FOLL_WRITE;
25684d8b81abSXiao Guangrong 	if (async)
25694d8b81abSXiao Guangrong 		flags |= FOLL_NOWAIT;
25704d8b81abSXiao Guangrong 	if (interruptible)
25714d8b81abSXiao Guangrong 		flags |= FOLL_INTERRUPTIBLE;
25724d8b81abSXiao Guangrong 
25734d8b81abSXiao Guangrong 	npages = get_user_pages_unlocked(addr, 1, &page, flags);
25744d8b81abSXiao Guangrong 	if (npages != 1)
25754d8b81abSXiao Guangrong 		return npages;
2576f8be156bSNicholas Piggin 
2577f8be156bSNicholas Piggin 	/* map read fault as writable if possible */
2578b14b2690SSean Christopherson 	if (unlikely(!write_fault) && writable) {
2579b14b2690SSean Christopherson 		struct page *wpage;
2580b14b2690SSean Christopherson 
2581f8be156bSNicholas Piggin 		if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2582b14b2690SSean Christopherson 			*writable = true;
2583b14b2690SSean Christopherson 			put_page(page);
2584f8be156bSNicholas Piggin 			page = wpage;
2585f8be156bSNicholas Piggin 		}
258692176a8eSPaolo Bonzini 	}
25871625566eSXianting Tian 	*pfn = page_to_pfn(page);
25881625566eSXianting Tian 	return npages;
258992176a8eSPaolo Bonzini }
2590a9545779SSean Christopherson 
vma_is_valid(struct vm_area_struct * vma,bool write_fault)2591bd2fae8dSPaolo Bonzini static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2592c33c7948SRyan Roberts {
2593bd2fae8dSPaolo Bonzini 	if (unlikely(!(vma->vm_flags & VM_READ)))
2594add6a0cdSPaolo Bonzini 		return false;
2595add6a0cdSPaolo Bonzini 
25969fd6dad1SPaolo Bonzini 	if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2597add6a0cdSPaolo Bonzini 		return false;
2598add6a0cdSPaolo Bonzini 
2599add6a0cdSPaolo Bonzini 	return true;
2600add6a0cdSPaolo Bonzini }
2601add6a0cdSPaolo Bonzini 
kvm_try_get_pfn(kvm_pfn_t pfn)2602add6a0cdSPaolo Bonzini static int kvm_try_get_pfn(kvm_pfn_t pfn)
260364019a2eSPeter Xu {
2604add6a0cdSPaolo Bonzini 	struct page *page = kvm_pfn_to_refcounted_page(pfn);
2605add6a0cdSPaolo Bonzini 
2606a8387d0bSPaolo Bonzini 	if (!page)
2607a8387d0bSPaolo Bonzini 		return 1;
2608add6a0cdSPaolo Bonzini 
2609add6a0cdSPaolo Bonzini 	return get_page_unless_zero(page);
2610add6a0cdSPaolo Bonzini }
26119fd6dad1SPaolo Bonzini 
hva_to_pfn_remapped(struct vm_area_struct * vma,unsigned long addr,bool write_fault,bool * writable,kvm_pfn_t * p_pfn)2612add6a0cdSPaolo Bonzini static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2613add6a0cdSPaolo Bonzini 			       unsigned long addr, bool write_fault,
2614bd2fae8dSPaolo Bonzini 			       bool *writable, kvm_pfn_t *p_pfn)
2615add6a0cdSPaolo Bonzini {
2616c33c7948SRyan Roberts 	kvm_pfn_t pfn;
2617c33c7948SRyan Roberts 	pte_t *ptep;
2618c33c7948SRyan Roberts 	pte_t pte;
2619bd2fae8dSPaolo Bonzini 	spinlock_t *ptl;
2620bd2fae8dSPaolo Bonzini 	int r;
2621add6a0cdSPaolo Bonzini 
2622add6a0cdSPaolo Bonzini 	r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2623a340b3e2SKarimAllah Ahmed 	if (r) {
2624c33c7948SRyan Roberts 		/*
2625c33c7948SRyan Roberts 		 * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2626add6a0cdSPaolo Bonzini 		 * not call the fault handler, so do it here.
2627add6a0cdSPaolo Bonzini 		 */
2628add6a0cdSPaolo Bonzini 		bool unlocked = false;
2629add6a0cdSPaolo Bonzini 		r = fixup_user_fault(current->mm, addr,
2630add6a0cdSPaolo Bonzini 				     (write_fault ? FAULT_FLAG_WRITE : 0),
263136c3ce6cSMarc Zyngier 				     &unlocked);
2632add6a0cdSPaolo Bonzini 		if (unlocked)
2633add6a0cdSPaolo Bonzini 			return -EAGAIN;
2634add6a0cdSPaolo Bonzini 		if (r)
2635add6a0cdSPaolo Bonzini 			return r;
2636add6a0cdSPaolo Bonzini 
2637f8be156bSNicholas Piggin 		r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2638f8be156bSNicholas Piggin 		if (r)
2639f8be156bSNicholas Piggin 			return r;
2640f8be156bSNicholas Piggin 	}
2641f8be156bSNicholas Piggin 
2642f8be156bSNicholas Piggin 	pte = ptep_get(ptep);
2643add6a0cdSPaolo Bonzini 
2644f8be156bSNicholas Piggin 	if (write_fault && !pte_write(pte)) {
2645f8be156bSNicholas Piggin 		pfn = KVM_PFN_ERR_RO_FAULT;
2646add6a0cdSPaolo Bonzini 		goto out;
2647bd2fae8dSPaolo Bonzini 	}
2648bd2fae8dSPaolo Bonzini 
2649add6a0cdSPaolo Bonzini 	if (writable)
2650f8be156bSNicholas Piggin 		*writable = pte_write(pte);
2651f8be156bSNicholas Piggin 	pfn = pte_pfn(pte);
265292176a8eSPaolo Bonzini 
265392176a8eSPaolo Bonzini 	/*
265412ce13feSXiao Guangrong 	 * Get a reference here because callers of *hva_to_pfn* and
265512ce13feSXiao Guangrong 	 * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
265612ce13feSXiao Guangrong 	 * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
265712ce13feSXiao Guangrong 	 * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2658c8b88b33SPeter Xu 	 * simply do nothing for reserved pfns.
265912ce13feSXiao Guangrong 	 *
266012ce13feSXiao Guangrong 	 * Whoever called remap_pfn_range is also going to call e.g.
266112ce13feSXiao Guangrong 	 * unmap_mapping_range before the underlying pages are freed,
266212ce13feSXiao Guangrong 	 * causing a call to our MMU notifier.
266312ce13feSXiao Guangrong 	 *
266412ce13feSXiao Guangrong 	 * Certain IO or PFNMAP mappings can be backed with valid
266512ce13feSXiao Guangrong 	 * struct pages, but be allocated without refcounting e.g.,
266612ce13feSXiao Guangrong 	 * tail pages of non-compound higher order allocations, which
266712ce13feSXiao Guangrong 	 * would then underflow the refcount when the caller does the
266812ce13feSXiao Guangrong 	 * required put_page. Don't allow those pages here.
2669c8b88b33SPeter Xu 	 */
2670c8b88b33SPeter Xu 	if (!kvm_try_get_pfn(pfn))
26712fc84311SXiao Guangrong 		r = -EFAULT;
26722e2e3738SAnthony Liguori 
2673943dfea8SSean Christopherson out:
267492176a8eSPaolo Bonzini 	pte_unmap_unlock(ptep, ptl);
26752fc84311SXiao Guangrong 	*p_pfn = pfn;
26762fc84311SXiao Guangrong 
26772fc84311SXiao Guangrong 	return r;
26782fc84311SXiao Guangrong }
2679b9b33da2SPaolo Bonzini 
26802fc84311SXiao Guangrong /*
26812e2e3738SAnthony Liguori  * Pin guest page in memory and return its pfn.
2682887c08acSXiao Guangrong  * @addr: host virtual address which maps memory to the guest
26836c8ee57bSXiao Guangrong  * @atomic: whether this function can sleep
2684887c08acSXiao Guangrong  * @interruptible: whether the process can be interrupted by non-fatal signals
2685c8b88b33SPeter Xu  * @async: whether this function need to wait IO complete if the
2686c8b88b33SPeter Xu  *         host page is not in the memory
26872fc84311SXiao Guangrong  * @write_fault: whether we should get a writable host page
26882fc84311SXiao Guangrong  * @writable: whether it allows to map a writable host page for !@write_fault
2689fe5ed56cSPeter Xu  *
2690fe5ed56cSPeter Xu  * The function will map a writable host page for these two cases:
26912e2e3738SAnthony Liguori  * 1): @write_fault = true
2692d8ed45c5SMichel Lespinasse  * 2): @write_fault = false && @writable, @writable will tell the caller
26930857b9e9SGleb Natapov  *     whether the mapping is writable.
26940857b9e9SGleb Natapov  */
hva_to_pfn(unsigned long addr,bool atomic,bool interruptible,bool * async,bool write_fault,bool * writable)26952fc84311SXiao Guangrong kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
26962fc84311SXiao Guangrong 		     bool *async, bool write_fault, bool *writable)
2697bf998156SHuang Ying {
2698bf998156SHuang Ying 	struct vm_area_struct *vma;
2699a8387d0bSPaolo Bonzini 	kvm_pfn_t pfn;
2700fc98c03bSLiam Howlett 	int npages, r;
27014c2155ceSMarcelo Tosatti 
27028030089fSGleb Natapov 	/* we can do it either atomically or asynchronously, not both */
27036c8ee57bSXiao Guangrong 	BUG_ON(atomic && async);
270492176a8eSPaolo Bonzini 
27051625566eSXianting Tian 	if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2706a8387d0bSPaolo Bonzini 		return pfn;
2707a8387d0bSPaolo Bonzini 
270892176a8eSPaolo Bonzini 	if (atomic)
270992176a8eSPaolo Bonzini 		return KVM_PFN_ERR_FAULT;
27108030089fSGleb Natapov 
27114d8b81abSXiao Guangrong 	npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
27128030089fSGleb Natapov 				 writable, &pfn);
27136c8ee57bSXiao Guangrong 	if (npages == 1)
27148030089fSGleb Natapov 		return pfn;
27152fc84311SXiao Guangrong 	if (npages == -EINTR)
2716d8ed45c5SMichel Lespinasse 		return KVM_PFN_ERR_SIGPENDING;
27172e2e3738SAnthony Liguori 
271835149e21SAnthony Liguori 	mmap_read_lock(current->mm);
271935149e21SAnthony Liguori 	if (npages == -EHWPOISON ||
27208283e36aSBen Gardon 	      (!async && check_user_page_hwpoison(addr))) {
2721c8b88b33SPeter Xu 		pfn = KVM_PFN_ERR_HWPOISON;
2722c8b88b33SPeter Xu 		goto exit;
2723887c08acSXiao Guangrong 	}
27244d8b81abSXiao Guangrong 
27254d8b81abSXiao Guangrong retry:
27264a42d848SDavid Stevens 	vma = vma_lookup(current->mm, addr);
27274a42d848SDavid Stevens 
27284a42d848SDavid Stevens 	if (vma == NULL)
2729b2740d35SPaolo Bonzini 		pfn = KVM_PFN_ERR_FAULT;
2730b2740d35SPaolo Bonzini 	else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2731b2740d35SPaolo Bonzini 		r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
27324d8b81abSXiao Guangrong 		if (r == -EAGAIN)
2733b2740d35SPaolo Bonzini 			goto retry;
27344d8b81abSXiao Guangrong 		if (r < 0)
2735b2740d35SPaolo Bonzini 			pfn = KVM_PFN_ERR_FAULT;
2736b2740d35SPaolo Bonzini 	} else {
2737b2740d35SPaolo Bonzini 		if (async && vma_is_valid(vma, write_fault))
273881c52c56SXiao Guangrong 			*async = true;
2739b2740d35SPaolo Bonzini 		pfn = KVM_PFN_ERR_FAULT;
27404d8b81abSXiao Guangrong 	}
27414d8b81abSXiao Guangrong exit:
27424d8b81abSXiao Guangrong 	mmap_read_unlock(current->mm);
27434d8b81abSXiao Guangrong 	return pfn;
27444d8b81abSXiao Guangrong }
2745887c08acSXiao Guangrong 
__gfn_to_pfn_memslot(const struct kvm_memory_slot * slot,gfn_t gfn,bool atomic,bool interruptible,bool * async,bool write_fault,bool * writable,hva_t * hva)27464d8b81abSXiao Guangrong kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2747c8b88b33SPeter Xu 			       bool atomic, bool interruptible, bool *async,
27484d8b81abSXiao Guangrong 			       bool write_fault, bool *writable, hva_t *hva)
27494d8b81abSXiao Guangrong {
27503520469dSPaolo Bonzini 	unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2751887c08acSXiao Guangrong 
2752ba049e93SDan Williams 	if (hva)
2753612819c3SMarcelo Tosatti 		*hva = addr;
2754612819c3SMarcelo Tosatti 
2755c8b88b33SPeter Xu 	if (addr == KVM_HVA_ERR_RO_BAD) {
2756c8b88b33SPeter Xu 		if (writable)
2757612819c3SMarcelo Tosatti 			*writable = false;
2758612819c3SMarcelo Tosatti 		return KVM_PFN_ERR_RO_FAULT;
2759612819c3SMarcelo Tosatti 	}
27608283e36aSBen Gardon 
2761506f0d6fSMarcelo Tosatti 	if (kvm_is_error_hva(addr)) {
2762c8b88b33SPeter Xu 		if (writable)
2763c8b88b33SPeter Xu 			*writable = false;
2764506f0d6fSMarcelo Tosatti 		return KVM_PFN_NOSLOT;
2765e37afc6eSPaolo Bonzini 	}
2766506f0d6fSMarcelo Tosatti 
27678283e36aSBen Gardon 	/* Do not map writable pfn in the readonly memslot. */
2768037d92dcSXiao Guangrong 	if (writable && memslot_is_readonly(slot)) {
2769c8b88b33SPeter Xu 		*writable = false;
2770c8b88b33SPeter Xu 		writable = NULL;
2771037d92dcSXiao Guangrong 	}
2772037d92dcSXiao Guangrong 
2773037d92dcSXiao Guangrong 	return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
2774ba049e93SDan Williams 			  writable);
27758e73485cSPaolo Bonzini }
27768e73485cSPaolo Bonzini EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
27778e73485cSPaolo Bonzini 
gfn_to_pfn_prot(struct kvm * kvm,gfn_t gfn,bool write_fault,bool * writable)27788e73485cSPaolo Bonzini kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
27798e73485cSPaolo Bonzini 		      bool *writable)
2780ba049e93SDan Williams {
2781e37afc6eSPaolo Bonzini 	return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, false,
2782e37afc6eSPaolo Bonzini 				    NULL, write_fault, writable, NULL);
2783e37afc6eSPaolo Bonzini }
2784e37afc6eSPaolo Bonzini EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2785e37afc6eSPaolo Bonzini 
gfn_to_pfn_memslot(const struct kvm_memory_slot * slot,gfn_t gfn)2786ba049e93SDan Williams kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
27878e73485cSPaolo Bonzini {
27888e73485cSPaolo Bonzini 	return __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, true,
27898e73485cSPaolo Bonzini 				    NULL, NULL);
27908e73485cSPaolo Bonzini }
27918e73485cSPaolo Bonzini EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2792d9ef13c2SPaolo Bonzini 
gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot * slot,gfn_t gfn)2793d9ef13c2SPaolo Bonzini kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
279448987781SXiao Guangrong {
279548987781SXiao Guangrong 	return __gfn_to_pfn_memslot(slot, gfn, true, false, NULL, true,
2796076b925dSArnd Bergmann 				    NULL, NULL);
279748987781SXiao Guangrong }
2798d9ef13c2SPaolo Bonzini EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
279948987781SXiao Guangrong 
kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu * vcpu,gfn_t gfn)280048987781SXiao Guangrong kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
280148987781SXiao Guangrong {
280248987781SXiao Guangrong 	return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
280348987781SXiao Guangrong }
280448987781SXiao Guangrong EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2805dadbb612SSouptick Joarder 
gfn_to_pfn(struct kvm * kvm,gfn_t gfn)280648987781SXiao Guangrong kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
280748987781SXiao Guangrong {
280848987781SXiao Guangrong 	return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2809b1624f99SSean Christopherson }
2810b1624f99SSean Christopherson EXPORT_SYMBOL_GPL(gfn_to_pfn);
2811b1624f99SSean Christopherson 
kvm_vcpu_gfn_to_pfn(struct kvm_vcpu * vcpu,gfn_t gfn)2812b1624f99SSean Christopherson kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2813b1624f99SSean Christopherson {
2814b1624f99SSean Christopherson 	return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
281535149e21SAnthony Liguori }
281635149e21SAnthony Liguori EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2817b14b2690SSean Christopherson 
gfn_to_page_many_atomic(struct kvm_memory_slot * slot,gfn_t gfn,struct page ** pages,int nr_pages)2818ba049e93SDan Williams int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
28192e2e3738SAnthony Liguori 			    struct page **pages, int nr_pages)
28202e2e3738SAnthony Liguori {
28212e2e3738SAnthony Liguori 	unsigned long addr;
2822c77fb9dcSXiantao Zhang 	gfn_t entry = 0;
28232e2e3738SAnthony Liguori 
28242e2e3738SAnthony Liguori 	addr = gfn_to_hva_many(slot, gfn, &entry);
2825b14b2690SSean Christopherson 	if (kvm_is_error_hva(addr))
2826b14b2690SSean Christopherson 		return -1;
28270fce5623SAvi Kivity 
28280fce5623SAvi Kivity 	if (entry < nr_pages)
2829b14b2690SSean Christopherson 		return 0;
28300fce5623SAvi Kivity 
28310fce5623SAvi Kivity 	return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
28320fce5623SAvi Kivity }
2833357a18adSDavid Woodhouse EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
283491724814SBoris Ostrovsky 
283591724814SBoris Ostrovsky /*
283691724814SBoris Ostrovsky  * Do not use this helper unless you are absolutely certain the gfn _must_ be
283791724814SBoris Ostrovsky  * backed by 'struct page'.  A valid example is if the backing memslot is
283891724814SBoris Ostrovsky  * controlled by KVM.  Note, if the returned page is valid, it's refcount has
283991724814SBoris Ostrovsky  * been elevated by gfn_to_pfn().
284091724814SBoris Ostrovsky  */
gfn_to_page(struct kvm * kvm,gfn_t gfn)2841357a18adSDavid Woodhouse struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2842e45adf66SKarimAllah Ahmed {
2843e45adf66SKarimAllah Ahmed 	struct page *page;
2844e45adf66SKarimAllah Ahmed 	kvm_pfn_t pfn;
2845e45adf66SKarimAllah Ahmed 
2846e45adf66SKarimAllah Ahmed 	pfn = gfn_to_pfn(kvm, gfn);
2847e45adf66SKarimAllah Ahmed 
2848e45adf66SKarimAllah Ahmed 	if (is_error_noslot_pfn(pfn))
2849e45adf66SKarimAllah Ahmed 		return KVM_ERR_PTR_BAD_PAGE;
2850357a18adSDavid Woodhouse 
2851e45adf66SKarimAllah Ahmed 	page = kvm_pfn_to_refcounted_page(pfn);
2852e45adf66SKarimAllah Ahmed 	if (!page)
2853e45adf66SKarimAllah Ahmed 		return KVM_ERR_PTR_BAD_PAGE;
2854e45adf66SKarimAllah Ahmed 
2855e45adf66SKarimAllah Ahmed 	return page;
2856e45adf66SKarimAllah Ahmed }
2857d30b214dSPaolo Bonzini EXPORT_SYMBOL_GPL(gfn_to_page);
285891724814SBoris Ostrovsky 
kvm_release_pfn(kvm_pfn_t pfn,bool dirty)2859357a18adSDavid Woodhouse void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
2860d30b214dSPaolo Bonzini {
2861e45adf66SKarimAllah Ahmed 	if (dirty)
2862e45adf66SKarimAllah Ahmed 		kvm_release_pfn_dirty(pfn);
2863e45adf66SKarimAllah Ahmed 	else
2864e45adf66SKarimAllah Ahmed 		kvm_release_pfn_clean(pfn);
2865e45adf66SKarimAllah Ahmed }
2866e45adf66SKarimAllah Ahmed 
kvm_vcpu_map(struct kvm_vcpu * vcpu,gfn_t gfn,struct kvm_host_map * map)2867e45adf66SKarimAllah Ahmed int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2868e45adf66SKarimAllah Ahmed {
2869e45adf66SKarimAllah Ahmed 	kvm_pfn_t pfn;
2870e45adf66SKarimAllah Ahmed 	void *hva = NULL;
2871e45adf66SKarimAllah Ahmed 	struct page *page = KVM_UNMAPPED_PAGE;
2872e45adf66SKarimAllah Ahmed 
2873e45adf66SKarimAllah Ahmed 	if (!map)
2874e45adf66SKarimAllah Ahmed 		return -EINVAL;
2875357a18adSDavid Woodhouse 
2876e45adf66SKarimAllah Ahmed 	pfn = gfn_to_pfn(vcpu->kvm, gfn);
2877e45adf66SKarimAllah Ahmed 	if (is_error_noslot_pfn(pfn))
2878e45adf66SKarimAllah Ahmed 		return -EINVAL;
2879e45adf66SKarimAllah Ahmed 
2880e45adf66SKarimAllah Ahmed 	if (pfn_valid(pfn)) {
2881e45adf66SKarimAllah Ahmed 		page = pfn_to_page(pfn);
2882e45adf66SKarimAllah Ahmed 		hva = kmap(page);
2883357a18adSDavid Woodhouse #ifdef CONFIG_HAS_IOMEM
288491724814SBoris Ostrovsky 	} else {
288591724814SBoris Ostrovsky 		hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
288691724814SBoris Ostrovsky #endif
2887357a18adSDavid Woodhouse 	}
2888eb1f2f38SChristian Borntraeger 
2889e45adf66SKarimAllah Ahmed 	if (!hva)
289091724814SBoris Ostrovsky 		return -EFAULT;
2891357a18adSDavid Woodhouse 
289291724814SBoris Ostrovsky 	map->page = page;
2893357a18adSDavid Woodhouse 	map->hva = hva;
2894e45adf66SKarimAllah Ahmed 	map->pfn = pfn;
2895e45adf66SKarimAllah Ahmed 	map->gfn = gfn;
2896e45adf66SKarimAllah Ahmed 
2897e45adf66SKarimAllah Ahmed 	return 0;
2898e45adf66SKarimAllah Ahmed }
2899e45adf66SKarimAllah Ahmed EXPORT_SYMBOL_GPL(kvm_vcpu_map);
29008e1c6914SSean Christopherson 
kvm_vcpu_unmap(struct kvm_vcpu * vcpu,struct kvm_host_map * map,bool dirty)29018e73485cSPaolo Bonzini void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
29028e1c6914SSean Christopherson {
29038e1c6914SSean Christopherson 	if (!map)
29048e1c6914SSean Christopherson 		return;
29058e1c6914SSean Christopherson 
29068e1c6914SSean Christopherson 	if (!map->hva)
29078e73485cSPaolo Bonzini 		return;
29088e1c6914SSean Christopherson 
29098e1c6914SSean Christopherson 	if (map->page != KVM_UNMAPPED_PAGE)
29108e1c6914SSean Christopherson 		kunmap(map->page);
29118e1c6914SSean Christopherson #ifdef CONFIG_HAS_IOMEM
29128e1c6914SSean Christopherson 	else
29138e1c6914SSean Christopherson 		memunmap(map->hva);
29148e1c6914SSean Christopherson #endif
29158e1c6914SSean Christopherson 
29168e1c6914SSean Christopherson 	if (dirty)
29178e1c6914SSean Christopherson 		kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
29188e1c6914SSean Christopherson 
29198e1c6914SSean Christopherson 	kvm_release_pfn(map->pfn, dirty);
29208e73485cSPaolo Bonzini 
29210fce5623SAvi Kivity 	map->hva = NULL;
29220fce5623SAvi Kivity 	map->page = NULL;
292332cad84fSXiao Guangrong }
292432cad84fSXiao Guangrong EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
29258e1c6914SSean Christopherson 
kvm_is_ad_tracked_page(struct page * page)29268e1c6914SSean Christopherson static bool kvm_is_ad_tracked_page(struct page *page)
29270fce5623SAvi Kivity {
29280fce5623SAvi Kivity 	/*
29290fce5623SAvi Kivity 	 * Per page-flags.h, pages tagged PG_reserved "should in general not be
2930ba049e93SDan Williams 	 * touched (e.g. set dirty) except by its owner".
293135149e21SAnthony Liguori 	 */
2932b14b2690SSean Christopherson 	return !PageReserved(page);
2933b14b2690SSean Christopherson }
2934b14b2690SSean Christopherson 
kvm_set_page_dirty(struct page * page)2935b14b2690SSean Christopherson static void kvm_set_page_dirty(struct page *page)
2936b14b2690SSean Christopherson {
2937b14b2690SSean Christopherson 	if (kvm_is_ad_tracked_page(page))
2938b14b2690SSean Christopherson 		SetPageDirty(page);
2939b14b2690SSean Christopherson }
2940b14b2690SSean Christopherson 
kvm_set_page_accessed(struct page * page)2941b14b2690SSean Christopherson static void kvm_set_page_accessed(struct page *page)
294235149e21SAnthony Liguori {
294335149e21SAnthony Liguori 	if (kvm_is_ad_tracked_page(page))
294435149e21SAnthony Liguori 		mark_page_accessed(page);
29450fce5623SAvi Kivity }
29460fce5623SAvi Kivity 
kvm_release_page_clean(struct page * page)2947a2766325SXiao Guangrong void kvm_release_page_clean(struct page *page)
2948a2766325SXiao Guangrong {
29498e1c6914SSean Christopherson 	WARN_ON(is_error_page(page));
29508e1c6914SSean Christopherson 
29510fce5623SAvi Kivity 	kvm_set_page_accessed(page);
29520fce5623SAvi Kivity 	put_page(page);
29530fce5623SAvi Kivity }
2954f7a6509fSDavid Hildenbrand EXPORT_SYMBOL_GPL(kvm_release_page_clean);
295535149e21SAnthony Liguori 
kvm_release_pfn_clean(kvm_pfn_t pfn)2956b14b2690SSean Christopherson void kvm_release_pfn_clean(kvm_pfn_t pfn)
2957b14b2690SSean Christopherson {
2958b14b2690SSean Christopherson 	struct page *page;
2959b14b2690SSean Christopherson 
2960b14b2690SSean Christopherson 	if (is_error_noslot_pfn(pfn))
2961b14b2690SSean Christopherson 		return;
2962b14b2690SSean Christopherson 
2963b14b2690SSean Christopherson 	page = kvm_pfn_to_refcounted_page(pfn);
2964b14b2690SSean Christopherson 	if (!page)
2965b14b2690SSean Christopherson 		return;
296635149e21SAnthony Liguori 
2967f7a6509fSDavid Hildenbrand 	kvm_release_page_clean(page);
296835149e21SAnthony Liguori }
2969a1040b0dSSean Christopherson EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
29708e1c6914SSean Christopherson 
kvm_release_page_dirty(struct page * page)29718e1c6914SSean Christopherson void kvm_release_page_dirty(struct page *page)
29728e1c6914SSean Christopherson {
2973a1040b0dSSean Christopherson 	WARN_ON(is_error_page(page));
2974ba049e93SDan Williams 
297535149e21SAnthony Liguori 	kvm_set_page_dirty(page);
29768e1c6914SSean Christopherson 	kvm_release_page_clean(page);
29778e1c6914SSean Christopherson }
29788e1c6914SSean Christopherson EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
29798e1c6914SSean Christopherson 
kvm_release_pfn_dirty(kvm_pfn_t pfn)29808e1c6914SSean Christopherson void kvm_release_pfn_dirty(kvm_pfn_t pfn)
29812e2e3738SAnthony Liguori {
298235149e21SAnthony Liguori 	struct page *page;
298335149e21SAnthony Liguori 
2984ba049e93SDan Williams 	if (is_error_noslot_pfn(pfn))
298535149e21SAnthony Liguori 		return;
29868e1c6914SSean Christopherson 
29878e1c6914SSean Christopherson 	page = kvm_pfn_to_refcounted_page(pfn);
29888e1c6914SSean Christopherson 	if (!page)
29898e1c6914SSean Christopherson 		return;
29908e1c6914SSean Christopherson 
299135149e21SAnthony Liguori 	kvm_release_page_dirty(page);
299235149e21SAnthony Liguori }
299335149e21SAnthony Liguori EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
29940fce5623SAvi Kivity 
29950fce5623SAvi Kivity /*
29960fce5623SAvi Kivity  * Note, checking for an error/noslot pfn is the caller's responsibility when
29970fce5623SAvi Kivity  * directly marking a page dirty/accessed.  Unlike the "release" helpers, the
29980fce5623SAvi Kivity  * "set" helpers are not to be used when the pfn might point at garbage.
29990fce5623SAvi Kivity  */
kvm_set_pfn_dirty(kvm_pfn_t pfn)30000fce5623SAvi Kivity void kvm_set_pfn_dirty(kvm_pfn_t pfn)
30010fce5623SAvi Kivity {
30028e73485cSPaolo Bonzini 	if (WARN_ON(is_error_noslot_pfn(pfn)))
30038e73485cSPaolo Bonzini 		return;
30040fce5623SAvi Kivity 
30050fce5623SAvi Kivity 	if (pfn_valid(pfn))
30060fce5623SAvi Kivity 		kvm_set_page_dirty(pfn_to_page(pfn));
30070fce5623SAvi Kivity }
30088e73485cSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
30090fce5623SAvi Kivity 
kvm_set_pfn_accessed(kvm_pfn_t pfn)30100fce5623SAvi Kivity void kvm_set_pfn_accessed(kvm_pfn_t pfn)
30113180a7fcSPaolo Bonzini {
30120fce5623SAvi Kivity 	if (WARN_ON(is_error_noslot_pfn(pfn)))
30130fce5623SAvi Kivity 		return;
30140fce5623SAvi Kivity 
30150fce5623SAvi Kivity 	if (pfn_valid(pfn))
30168e73485cSPaolo Bonzini 		kvm_set_page_accessed(pfn_to_page(pfn));
30178e73485cSPaolo Bonzini }
30188e73485cSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
30198e73485cSPaolo Bonzini 
next_segment(unsigned long len,int offset)30208e73485cSPaolo Bonzini static int next_segment(unsigned long len, int offset)
30218e73485cSPaolo Bonzini {
30228e73485cSPaolo Bonzini 	if (len > PAGE_SIZE - offset)
30238e73485cSPaolo Bonzini 		return PAGE_SIZE - offset;
30240fce5623SAvi Kivity 	else
30250fce5623SAvi Kivity 		return len;
30268e73485cSPaolo Bonzini }
30278e73485cSPaolo Bonzini 
__kvm_read_guest_page(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,int len)30288e73485cSPaolo Bonzini static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
30298e73485cSPaolo Bonzini 				 void *data, int offset, int len)
30308e73485cSPaolo Bonzini {
30318e73485cSPaolo Bonzini 	int r;
30328e73485cSPaolo Bonzini 	unsigned long addr;
30338e73485cSPaolo Bonzini 
30348e73485cSPaolo Bonzini 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
30350fce5623SAvi Kivity 	if (kvm_is_error_hva(addr))
30360fce5623SAvi Kivity 		return -EFAULT;
30370fce5623SAvi Kivity 	r = __copy_from_user(data, (void __user *)addr + offset, len);
30380fce5623SAvi Kivity 	if (r)
30390fce5623SAvi Kivity 		return -EFAULT;
30400fce5623SAvi Kivity 	return 0;
30410fce5623SAvi Kivity }
30420fce5623SAvi Kivity 
kvm_read_guest_page(struct kvm * kvm,gfn_t gfn,void * data,int offset,int len)30430fce5623SAvi Kivity int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
30440fce5623SAvi Kivity 			int len)
30450fce5623SAvi Kivity {
30460fce5623SAvi Kivity 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
30470fce5623SAvi Kivity 
30480fce5623SAvi Kivity 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
30490fce5623SAvi Kivity }
30500fce5623SAvi Kivity EXPORT_SYMBOL_GPL(kvm_read_guest_page);
30510fce5623SAvi Kivity 
kvm_vcpu_read_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len)30520fce5623SAvi Kivity int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
30530fce5623SAvi Kivity 			     int offset, int len)
30540fce5623SAvi Kivity {
30558e73485cSPaolo Bonzini 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
30568e73485cSPaolo Bonzini 
30578e73485cSPaolo Bonzini 	return __kvm_read_guest_page(slot, gfn, data, offset, len);
30588e73485cSPaolo Bonzini }
30598e73485cSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
30608e73485cSPaolo Bonzini 
kvm_read_guest(struct kvm * kvm,gpa_t gpa,void * data,unsigned long len)30618e73485cSPaolo Bonzini int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
30628e73485cSPaolo Bonzini {
30638e73485cSPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
30648e73485cSPaolo Bonzini 	int seg;
30658e73485cSPaolo Bonzini 	int offset = offset_in_page(gpa);
30668e73485cSPaolo Bonzini 	int ret;
30678e73485cSPaolo Bonzini 
30688e73485cSPaolo Bonzini 	while ((seg = next_segment(len, offset)) != 0) {
30698e73485cSPaolo Bonzini 		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
30708e73485cSPaolo Bonzini 		if (ret < 0)
30718e73485cSPaolo Bonzini 			return ret;
30728e73485cSPaolo Bonzini 		offset = 0;
30738e73485cSPaolo Bonzini 		len -= seg;
30748e73485cSPaolo Bonzini 		data += seg;
30758e73485cSPaolo Bonzini 		++gfn;
30768e73485cSPaolo Bonzini 	}
30777ec54588SMarcelo Tosatti 	return 0;
30787ec54588SMarcelo Tosatti }
30797ec54588SMarcelo Tosatti EXPORT_SYMBOL_GPL(kvm_read_guest);
30807ec54588SMarcelo Tosatti 
kvm_vcpu_read_guest(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)30818e73485cSPaolo Bonzini int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
30827ec54588SMarcelo Tosatti {
30837ec54588SMarcelo Tosatti 	gfn_t gfn = gpa >> PAGE_SHIFT;
30840aac03f0SAndrea Arcangeli 	int seg;
30853180a7fcSPaolo Bonzini 	int offset = offset_in_page(gpa);
30860aac03f0SAndrea Arcangeli 	int ret;
30877ec54588SMarcelo Tosatti 
30887ec54588SMarcelo Tosatti 	while ((seg = next_segment(len, offset)) != 0) {
30897ec54588SMarcelo Tosatti 		ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
30907ec54588SMarcelo Tosatti 		if (ret < 0)
30917ec54588SMarcelo Tosatti 			return ret;
30928e73485cSPaolo Bonzini 		offset = 0;
30938e73485cSPaolo Bonzini 		len -= seg;
30948e73485cSPaolo Bonzini 		data += seg;
30958e73485cSPaolo Bonzini 		++gfn;
30968e73485cSPaolo Bonzini 	}
30978e73485cSPaolo Bonzini 	return 0;
30988e73485cSPaolo Bonzini }
30998e73485cSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
31008e73485cSPaolo Bonzini 
__kvm_read_guest_atomic(struct kvm_memory_slot * slot,gfn_t gfn,void * data,int offset,unsigned long len)31018e73485cSPaolo Bonzini static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
31028e73485cSPaolo Bonzini 			           void *data, int offset, unsigned long len)
310328bd726aSPeter Xu {
310428bd726aSPeter Xu 	int r;
31058e73485cSPaolo Bonzini 	unsigned long addr;
31060fce5623SAvi Kivity 
31070fce5623SAvi Kivity 	addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
31080fce5623SAvi Kivity 	if (kvm_is_error_hva(addr))
31090fce5623SAvi Kivity 		return -EFAULT;
3110251eb841SRadim Krčmář 	pagefault_disable();
31110fce5623SAvi Kivity 	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
31120fce5623SAvi Kivity 	pagefault_enable();
31138b0cedffSXiao Guangrong 	if (r)
31140fce5623SAvi Kivity 		return -EFAULT;
31150fce5623SAvi Kivity 	return 0;
311628bd726aSPeter Xu }
31170fce5623SAvi Kivity 
kvm_vcpu_read_guest_atomic(struct kvm_vcpu * vcpu,gpa_t gpa,void * data,unsigned long len)31180fce5623SAvi Kivity int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
31198e73485cSPaolo Bonzini 			       void *data, unsigned long len)
31208e73485cSPaolo Bonzini {
31218e73485cSPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
31228e73485cSPaolo Bonzini 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
31238e73485cSPaolo Bonzini 	int offset = offset_in_page(gpa);
31248e73485cSPaolo Bonzini 
312528bd726aSPeter Xu 	return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
31268e73485cSPaolo Bonzini }
31270fce5623SAvi Kivity EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
31280fce5623SAvi Kivity 
__kvm_write_guest_page(struct kvm * kvm,struct kvm_memory_slot * memslot,gfn_t gfn,const void * data,int offset,int len)31298e73485cSPaolo Bonzini static int __kvm_write_guest_page(struct kvm *kvm,
31308e73485cSPaolo Bonzini 				  struct kvm_memory_slot *memslot, gfn_t gfn,
31318e73485cSPaolo Bonzini 			          const void *data, int offset, int len)
31328e73485cSPaolo Bonzini {
31338e73485cSPaolo Bonzini 	int r;
313428bd726aSPeter Xu 	unsigned long addr;
31358e73485cSPaolo Bonzini 
31368e73485cSPaolo Bonzini 	addr = gfn_to_hva_memslot(memslot, gfn);
31378e73485cSPaolo Bonzini 	if (kvm_is_error_hva(addr))
31380fce5623SAvi Kivity 		return -EFAULT;
31390fce5623SAvi Kivity 	r = __copy_to_user((void __user *)addr + offset, data, len);
31400fce5623SAvi Kivity 	if (r)
31410fce5623SAvi Kivity 		return -EFAULT;
31420fce5623SAvi Kivity 	mark_page_dirty_in_slot(kvm, memslot, gfn);
31430fce5623SAvi Kivity 	return 0;
31440fce5623SAvi Kivity }
31450fce5623SAvi Kivity 
kvm_write_guest_page(struct kvm * kvm,gfn_t gfn,const void * data,int offset,int len)31460fce5623SAvi Kivity int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
31470fce5623SAvi Kivity 			 const void *data, int offset, int len)
31480fce5623SAvi Kivity {
31490fce5623SAvi Kivity 	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
31500fce5623SAvi Kivity 
31510fce5623SAvi Kivity 	return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
31520fce5623SAvi Kivity }
31530fce5623SAvi Kivity EXPORT_SYMBOL_GPL(kvm_write_guest_page);
31540fce5623SAvi Kivity 
kvm_vcpu_write_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,const void * data,int offset,int len)31550fce5623SAvi Kivity int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
31560fce5623SAvi Kivity 			      const void *data, int offset, int len)
3157ff651cb6SWincy Van {
31580fce5623SAvi Kivity 	struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
31598e73485cSPaolo Bonzini 
31608e73485cSPaolo Bonzini 	return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
31618e73485cSPaolo Bonzini }
31628e73485cSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
31638e73485cSPaolo Bonzini 
kvm_write_guest(struct kvm * kvm,gpa_t gpa,const void * data,unsigned long len)31648e73485cSPaolo Bonzini int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
31658e73485cSPaolo Bonzini 		    unsigned long len)
31668e73485cSPaolo Bonzini {
31678e73485cSPaolo Bonzini 	gfn_t gfn = gpa >> PAGE_SHIFT;
31688e73485cSPaolo Bonzini 	int seg;
31698e73485cSPaolo Bonzini 	int offset = offset_in_page(gpa);
31708e73485cSPaolo Bonzini 	int ret;
31718e73485cSPaolo Bonzini 
31728e73485cSPaolo Bonzini 	while ((seg = next_segment(len, offset)) != 0) {
31738e73485cSPaolo Bonzini 		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
31748e73485cSPaolo Bonzini 		if (ret < 0)
31758e73485cSPaolo Bonzini 			return ret;
31768e73485cSPaolo Bonzini 		offset = 0;
31778e73485cSPaolo Bonzini 		len -= seg;
31788e73485cSPaolo Bonzini 		data += seg;
31798e73485cSPaolo Bonzini 		++gfn;
31805a2d4365SPaolo Bonzini 	}
31815a2d4365SPaolo Bonzini 	return 0;
31828f964525SAndrew Honig }
318349c7754cSGleb Natapov EXPORT_SYMBOL_GPL(kvm_write_guest);
318449c7754cSGleb Natapov 
kvm_vcpu_write_guest(struct kvm_vcpu * vcpu,gpa_t gpa,const void * data,unsigned long len)31858f964525SAndrew Honig int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
31868f964525SAndrew Honig 		         unsigned long len)
31878f964525SAndrew Honig {
31888f964525SAndrew Honig 	gfn_t gfn = gpa >> PAGE_SHIFT;
318949c7754cSGleb Natapov 	int seg;
31906ad1e29fSSean Christopherson 	int offset = offset_in_page(gpa);
319149c7754cSGleb Natapov 	int ret;
31926ad1e29fSSean Christopherson 
31936ad1e29fSSean Christopherson 	while ((seg = next_segment(len, offset)) != 0) {
3194f1b9dd5eSJim Mattson 		ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
31956ad1e29fSSean Christopherson 		if (ret < 0)
31966ad1e29fSSean Christopherson 			return ret;
3197f1b9dd5eSJim Mattson 		offset = 0;
31988f964525SAndrew Honig 		len -= seg;
31998f964525SAndrew Honig 		data += seg;
32008f964525SAndrew Honig 		++gfn;
32018f964525SAndrew Honig 	}
32026ad1e29fSSean Christopherson 	return 0;
32035a2d4365SPaolo Bonzini }
32048f964525SAndrew Honig EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
32058f964525SAndrew Honig 
__kvm_gfn_to_hva_cache_init(struct kvm_memslots * slots,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)32068f964525SAndrew Honig static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
32076ad1e29fSSean Christopherson 				       struct gfn_to_hva_cache *ghc,
32088f964525SAndrew Honig 				       gpa_t gpa, unsigned long len)
3209f1b9dd5eSJim Mattson {
32108f964525SAndrew Honig 	int offset = offset_in_page(gpa);
32116ad1e29fSSean Christopherson 	gfn_t start_gfn = gpa >> PAGE_SHIFT;
3212f1b9dd5eSJim Mattson 	gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3213f1b9dd5eSJim Mattson 	gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
32148f964525SAndrew Honig 	gfn_t nr_pages_avail;
3215f1b9dd5eSJim Mattson 
32166ad1e29fSSean Christopherson 	/* Update ghc->generation before performing any error checks. */
32176ad1e29fSSean Christopherson 	ghc->generation = slots->generation;
32186ad1e29fSSean Christopherson 
321949c7754cSGleb Natapov 	if (start_gfn > end_gfn) {
32205a2d4365SPaolo Bonzini 		ghc->hva = KVM_HVA_ERR_BAD;
32214e335d9eSPaolo Bonzini 		return -EINVAL;
32225a2d4365SPaolo Bonzini 	}
32235a2d4365SPaolo Bonzini 
32244e335d9eSPaolo Bonzini 	/*
32255a2d4365SPaolo Bonzini 	 * If the requested region crosses two memslots, we still
32265a2d4365SPaolo Bonzini 	 * verify that the entire region is valid here.
32274e335d9eSPaolo Bonzini 	 */
322849c7754cSGleb Natapov 	for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
32294e335d9eSPaolo Bonzini 		ghc->memslot = __gfn_to_memslot(slots, start_gfn);
32307a86dab8SJim Mattson 		ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
32317a86dab8SJim Mattson 					   &nr_pages_avail);
323249c7754cSGleb Natapov 		if (kvm_is_error_hva(ghc->hva))
32334e335d9eSPaolo Bonzini 			return -EFAULT;
323449c7754cSGleb Natapov 	}
32354ec6e863SPan Xinhui 
323649c7754cSGleb Natapov 	/* Use the slow path for cross page reads and writes. */
32375f25e71eSPaolo Bonzini 	if (nr_pages_needed == 1)
32385f25e71eSPaolo Bonzini 		ghc->hva += offset;
32398f964525SAndrew Honig 	else
3240dc9ce71eSSean Christopherson 		ghc->memslot = NULL;
3241dc9ce71eSSean Christopherson 
3242dc9ce71eSSean Christopherson 	ghc->gpa = gpa;
3243dc9ce71eSSean Christopherson 	ghc->len = len;
32448f964525SAndrew Honig 	return 0;
324549c7754cSGleb Natapov }
324649c7754cSGleb Natapov 
kvm_gfn_to_hva_cache_init(struct kvm * kvm,struct gfn_to_hva_cache * ghc,gpa_t gpa,unsigned long len)324749c7754cSGleb Natapov int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3248fcfbc617SSean Christopherson 			      gpa_t gpa, unsigned long len)
3249fcfbc617SSean Christopherson {
3250fcfbc617SSean Christopherson 	struct kvm_memslots *slots = kvm_memslots(kvm);
32514ec6e863SPan Xinhui 	return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
325249c7754cSGleb Natapov }
325349c7754cSGleb Natapov EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
325428bd726aSPeter Xu 
kvm_write_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)325549c7754cSGleb Natapov int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
325649c7754cSGleb Natapov 				  void *data, unsigned int offset,
325749c7754cSGleb Natapov 				  unsigned long len)
32584e335d9eSPaolo Bonzini {
32594ec6e863SPan Xinhui 	struct kvm_memslots *slots = kvm_memslots(kvm);
32604e335d9eSPaolo Bonzini 	int r;
32614ec6e863SPan Xinhui 	gpa_t gpa = ghc->gpa + offset;
32624ec6e863SPan Xinhui 
32634e335d9eSPaolo Bonzini 	if (WARN_ON_ONCE(len + offset > ghc->len))
32644ec6e863SPan Xinhui 		return -EINVAL;
32654e335d9eSPaolo Bonzini 
326649c7754cSGleb Natapov 	if (slots->generation != ghc->generation) {
32670958f0ceSVitaly Kuznetsov 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
32680958f0ceSVitaly Kuznetsov 			return -EFAULT;
32690958f0ceSVitaly Kuznetsov 	}
3270e03b644fSGleb Natapov 
32714e335d9eSPaolo Bonzini 	if (kvm_is_error_hva(ghc->hva))
3272e03b644fSGleb Natapov 		return -EFAULT;
32730958f0ceSVitaly Kuznetsov 
3274e03b644fSGleb Natapov 	if (unlikely(!ghc->memslot))
32755f25e71eSPaolo Bonzini 		return kvm_write_guest(kvm, gpa, data, len);
32765f25e71eSPaolo Bonzini 
32778f964525SAndrew Honig 	r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3278dc9ce71eSSean Christopherson 	if (r)
3279dc9ce71eSSean Christopherson 		return -EFAULT;
3280dc9ce71eSSean Christopherson 	mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3281dc9ce71eSSean Christopherson 
32828f964525SAndrew Honig 	return 0;
3283e03b644fSGleb Natapov }
3284e03b644fSGleb Natapov EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3285e03b644fSGleb Natapov 
kvm_write_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)3286fcfbc617SSean Christopherson int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
32870958f0ceSVitaly Kuznetsov 			   void *data, unsigned long len)
3288fcfbc617SSean Christopherson {
32890958f0ceSVitaly Kuznetsov 	return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3290e03b644fSGleb Natapov }
3291e03b644fSGleb Natapov EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3292e03b644fSGleb Natapov 
kvm_read_guest_offset_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned int offset,unsigned long len)3293e03b644fSGleb Natapov int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3294e03b644fSGleb Natapov 				 void *data, unsigned int offset,
32950958f0ceSVitaly Kuznetsov 				 unsigned long len)
32960958f0ceSVitaly Kuznetsov {
32970958f0ceSVitaly Kuznetsov 	struct kvm_memslots *slots = kvm_memslots(kvm);
32980958f0ceSVitaly Kuznetsov 	int r;
32990958f0ceSVitaly Kuznetsov 	gpa_t gpa = ghc->gpa + offset;
33000958f0ceSVitaly Kuznetsov 
33010958f0ceSVitaly Kuznetsov 	if (WARN_ON_ONCE(len + offset > ghc->len))
33024e335d9eSPaolo Bonzini 		return -EINVAL;
3303e03b644fSGleb Natapov 
33040fce5623SAvi Kivity 	if (slots->generation != ghc->generation) {
33050fce5623SAvi Kivity 		if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
33062f541442SPaolo Bonzini 			return -EFAULT;
33070fce5623SAvi Kivity 	}
33080fce5623SAvi Kivity 
33090fce5623SAvi Kivity 	if (kvm_is_error_hva(ghc->hva))
33100fce5623SAvi Kivity 		return -EFAULT;
33110fce5623SAvi Kivity 
33120fce5623SAvi Kivity 	if (unlikely(!ghc->memslot))
33132f541442SPaolo Bonzini 		return kvm_read_guest(kvm, gpa, data, len);
33140fce5623SAvi Kivity 
33150fce5623SAvi Kivity 	r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
33160fce5623SAvi Kivity 	if (r)
33170fce5623SAvi Kivity 		return -EFAULT;
33180fce5623SAvi Kivity 
33190fce5623SAvi Kivity 	return 0;
33200fce5623SAvi Kivity }
33210fce5623SAvi Kivity EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
33220fce5623SAvi Kivity 
kvm_read_guest_cached(struct kvm * kvm,struct gfn_to_hva_cache * ghc,void * data,unsigned long len)33230fce5623SAvi Kivity int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
332428bd726aSPeter Xu 			  void *data, unsigned long len)
33258283e36aSBen Gardon {
332628bd726aSPeter Xu 	return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
33270fce5623SAvi Kivity }
33282efd61a6SDavid Woodhouse EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
33292efd61a6SDavid Woodhouse 
kvm_clear_guest(struct kvm * kvm,gpa_t gpa,unsigned long len)3330e09fccb5SChristian Borntraeger int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
333186bdf3ebSGavin Shan {
33322efd61a6SDavid Woodhouse 	const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
333386bdf3ebSGavin Shan 	gfn_t gfn = gpa >> PAGE_SHIFT;
3334c57351a7SGavin Shan 	int seg;
3335e09fccb5SChristian Borntraeger 	int offset = offset_in_page(gpa);
33362efd61a6SDavid Woodhouse 	int ret;
3337044c59c4SPeter Xu 
33380fce5623SAvi Kivity 	while ((seg = next_segment(len, offset)) != 0) {
3339fb04a1edSPeter Xu 		ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
33400fce5623SAvi Kivity 		if (ret < 0)
334186bdf3ebSGavin Shan 			return ret;
3342cf87ac73SGavin Shan 		offset = 0;
3343c57351a7SGavin Shan 		len -= seg;
3344b74ca3b3STakuya Yoshikawa 		++gfn;
33450fce5623SAvi Kivity 	}
33460fce5623SAvi Kivity 	return 0;
3347a6a0b05dSBen Gardon }
33480fce5623SAvi Kivity EXPORT_SYMBOL_GPL(kvm_clear_guest);
334949c7754cSGleb Natapov 
mark_page_dirty_in_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot,gfn_t gfn)335049c7754cSGleb Natapov void mark_page_dirty_in_slot(struct kvm *kvm,
335149c7754cSGleb Natapov 			     const struct kvm_memory_slot *memslot,
335249c7754cSGleb Natapov 		 	     gfn_t gfn)
335349c7754cSGleb Natapov {
335428bd726aSPeter Xu 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
335549c7754cSGleb Natapov 
33562ba9f0d8SAneesh Kumar K.V #ifdef CONFIG_HAVE_KVM_DIRTY_RING
335749c7754cSGleb Natapov 	if (WARN_ON_ONCE(vcpu && vcpu->kvm != kvm))
33588e73485cSPaolo Bonzini 		return;
33598e73485cSPaolo Bonzini 
33608e73485cSPaolo Bonzini 	WARN_ON_ONCE(!vcpu && !kvm_arch_allow_write_without_running_vcpu(kvm));
33618e73485cSPaolo Bonzini #endif
33628e73485cSPaolo Bonzini 
336328bd726aSPeter Xu 	if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
33648e73485cSPaolo Bonzini 		unsigned long rel_gfn = gfn - memslot->base_gfn;
33658e73485cSPaolo Bonzini 		u32 slot = (memslot->as_id << 16) | memslot->id;
33668e73485cSPaolo Bonzini 
336720b7035cSJan H. Schönherr 		if (kvm->dirty_ring_size && vcpu)
336820b7035cSJan H. Schönherr 			kvm_dirty_ring_push(vcpu, slot, rel_gfn);
336920b7035cSJan H. Schönherr 		else if (memslot->dirty_bitmap)
337020b7035cSJan H. Schönherr 			set_bit_le(rel_gfn, memslot->dirty_bitmap);
337120b7035cSJan H. Schönherr 	}
337220b7035cSJan H. Schönherr }
337320b7035cSJan H. Schönherr EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
337420b7035cSJan H. Schönherr 
mark_page_dirty(struct kvm * kvm,gfn_t gfn)337520b7035cSJan H. Schönherr void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
337620b7035cSJan H. Schönherr {
337720b7035cSJan H. Schönherr 	struct kvm_memory_slot *memslot;
337820b7035cSJan H. Schönherr 
337920b7035cSJan H. Schönherr 	memslot = gfn_to_memslot(kvm, gfn);
338020b7035cSJan H. Schönherr 	mark_page_dirty_in_slot(kvm, memslot, gfn);
338120b7035cSJan H. Schönherr }
338220b7035cSJan H. Schönherr EXPORT_SYMBOL_GPL(mark_page_dirty);
338320b7035cSJan H. Schönherr 
kvm_vcpu_mark_page_dirty(struct kvm_vcpu * vcpu,gfn_t gfn)338420b7035cSJan H. Schönherr void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
338520b7035cSJan H. Schönherr {
338620b7035cSJan H. Schönherr 	struct kvm_memory_slot *memslot;
338720b7035cSJan H. Schönherr 
338820b7035cSJan H. Schönherr 	memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
338920b7035cSJan H. Schönherr 	mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3390aca6ff29SWanpeng Li }
3391aca6ff29SWanpeng Li EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3392dee339b5SNir Weiner 
kvm_sigset_activate(struct kvm_vcpu * vcpu)3393aca6ff29SWanpeng Li void kvm_sigset_activate(struct kvm_vcpu *vcpu)
33942cbd7824SWanpeng Li {
3395dee339b5SNir Weiner 	if (!vcpu->sigset_active)
33966b6de68cSChristian Borntraeger 		return;
33977fa08e71SNir Weiner 
33987fa08e71SNir Weiner 	/*
33997fa08e71SNir Weiner 	 * This does a lockless modification of ->real_blocked, which is fine
34006b6de68cSChristian Borntraeger 	 * because, only current can change ->real_blocked and all readers of
3401dee339b5SNir Weiner 	 * ->real_blocked don't care as long ->real_blocked is always a subset
3402dee339b5SNir Weiner 	 * of ->blocked.
3403aca6ff29SWanpeng Li 	 */
3404aca6ff29SWanpeng Li 	sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
34057fa08e71SNir Weiner }
34062cbd7824SWanpeng Li 
kvm_sigset_deactivate(struct kvm_vcpu * vcpu)3407aca6ff29SWanpeng Li void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3408aca6ff29SWanpeng Li {
3409aca6ff29SWanpeng Li 	if (!vcpu->sigset_active)
3410aca6ff29SWanpeng Li 		return;
3411ae232ea4SSergey Senozhatsky 
3412aca6ff29SWanpeng Li 	sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
34132cbd7824SWanpeng Li 	sigemptyset(&current->real_blocked);
34146b6de68cSChristian Borntraeger }
3415ae232ea4SSergey Senozhatsky 
grow_halt_poll_ns(struct kvm_vcpu * vcpu)34166b6de68cSChristian Borntraeger static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3417aca6ff29SWanpeng Li {
3418aca6ff29SWanpeng Li 	unsigned int old, val, grow, grow_start;
34196b6de68cSChristian Borntraeger 
3420aca6ff29SWanpeng Li 	old = val = vcpu->halt_poll_ns;
3421ae232ea4SSergey Senozhatsky 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
3422ae232ea4SSergey Senozhatsky 	grow = READ_ONCE(halt_poll_ns_grow);
3423ae232ea4SSergey Senozhatsky 	if (!grow)
3424aca6ff29SWanpeng Li 		goto out;
34252cbd7824SWanpeng Li 
3426aca6ff29SWanpeng Li 	val *= grow;
3427aca6ff29SWanpeng Li 	if (val < grow_start)
3428f7819512SPaolo Bonzini 		val = grow_start;
3429f7819512SPaolo Bonzini 
343050c28f21SJunaid Shahid 	vcpu->halt_poll_ns = val;
343150c28f21SJunaid Shahid out:
343250c28f21SJunaid Shahid 	trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3433c59fb127SPaolo Bonzini }
343450c28f21SJunaid Shahid 
shrink_halt_poll_ns(struct kvm_vcpu * vcpu)3435f7819512SPaolo Bonzini static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
343650c28f21SJunaid Shahid {
3437f7819512SPaolo Bonzini 	unsigned int old, val, shrink, grow_start;
343850c28f21SJunaid Shahid 
3439084071d5SMarcelo Tosatti 	old = val = vcpu->halt_poll_ns;
3440084071d5SMarcelo Tosatti 	shrink = READ_ONCE(halt_poll_ns_shrink);
3441f7819512SPaolo Bonzini 	grow_start = READ_ONCE(halt_poll_ns_grow_start);
344250c28f21SJunaid Shahid 	if (shrink == 0)
344350c28f21SJunaid Shahid 		val = 0;
344450c28f21SJunaid Shahid 	else
344550c28f21SJunaid Shahid 		val /= shrink;
3446f7819512SPaolo Bonzini 
3447f7819512SPaolo Bonzini 	if (val < grow_start)
34480fce5623SAvi Kivity 		val = 0;
3449fac42688SSean Christopherson 
3450fac42688SSean Christopherson 	vcpu->halt_poll_ns = val;
3451fac42688SSean Christopherson 	trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
34520fce5623SAvi Kivity }
3453fac42688SSean Christopherson 
kvm_vcpu_check_block(struct kvm_vcpu * vcpu)34540fce5623SAvi Kivity static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3455fac42688SSean Christopherson {
3456f7819512SPaolo Bonzini 	int ret = -EINTR;
3457fac42688SSean Christopherson 	int idx = srcu_read_lock(&vcpu->kvm->srcu);
3458c3858335SJing Zhang 
3459f7819512SPaolo Bonzini 	if (kvm_arch_vcpu_runnable(vcpu))
346018869f26SMaxim Levitsky 		goto out;
346107ab0f8dSMarc Zyngier 	if (kvm_cpu_has_pending_timer(vcpu))
3462fac42688SSean Christopherson 		goto out;
346318869f26SMaxim Levitsky 	if (signal_pending(current))
346418869f26SMaxim Levitsky 		goto out;
3465e5c239cfSMarcelo Tosatti 	if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3466da4ad88cSDavidlohr Bueso 		goto out;
34670fce5623SAvi Kivity 
3468f7819512SPaolo Bonzini 	ret = 0;
3469e5c239cfSMarcelo Tosatti out:
3470e5c239cfSMarcelo Tosatti 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
3471f7819512SPaolo Bonzini 	return ret;
34720fce5623SAvi Kivity }
34730fce5623SAvi Kivity 
3474fac42688SSean Christopherson /*
347518869f26SMaxim Levitsky  * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
347618869f26SMaxim Levitsky  * pending.  This is mostly used when halting a vCPU, but may also be used
3477fac42688SSean Christopherson  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
347818869f26SMaxim Levitsky  */
kvm_vcpu_block(struct kvm_vcpu * vcpu)3479fac42688SSean Christopherson bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3480c3858335SJing Zhang {
3481c3858335SJing Zhang 	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3482fac42688SSean Christopherson 	bool waited = false;
3483fac42688SSean Christopherson 
3484fac42688SSean Christopherson 	vcpu->stat.generic.blocking = 1;
348529e72893SSean Christopherson 
348629e72893SSean Christopherson 	preempt_disable();
34870fce5623SAvi Kivity 	kvm_arch_vcpu_blocking(vcpu);
348830c94347SSean Christopherson 	prepare_to_rcuwait(wait);
348929e72893SSean Christopherson 	preempt_enable();
349029e72893SSean Christopherson 
349130c94347SSean Christopherson 	for (;;) {
349230c94347SSean Christopherson 		set_current_state(TASK_INTERRUPTIBLE);
349330c94347SSean Christopherson 
349430c94347SSean Christopherson 		if (kvm_vcpu_check_block(vcpu) < 0)
349530c94347SSean Christopherson 			break;
349630c94347SSean Christopherson 
349730c94347SSean Christopherson 		waited = true;
349830c94347SSean Christopherson 		schedule();
349930c94347SSean Christopherson 	}
350030c94347SSean Christopherson 
350130c94347SSean Christopherson 	preempt_disable();
350230c94347SSean Christopherson 	finish_rcuwait(wait);
350330c94347SSean Christopherson 	kvm_arch_vcpu_unblocking(vcpu);
350430c94347SSean Christopherson 	preempt_enable();
3505e5c239cfSMarcelo Tosatti 
35060fce5623SAvi Kivity 	vcpu->stat.generic.blocking = 0;
3507175d5dc7SDavid Matlack 
3508175d5dc7SDavid Matlack 	return waited;
35099eb8ca04SDavid Matlack }
35109eb8ca04SDavid Matlack 
update_halt_poll_stats(struct kvm_vcpu * vcpu,ktime_t start,ktime_t end,bool success)35119eb8ca04SDavid Matlack static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
35129eb8ca04SDavid Matlack 					  ktime_t end, bool success)
35139eb8ca04SDavid Matlack {
35149eb8ca04SDavid Matlack 	struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
35159eb8ca04SDavid Matlack 	u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
35169eb8ca04SDavid Matlack 
35179eb8ca04SDavid Matlack 	++vcpu->stat.generic.halt_attempted_poll;
35189eb8ca04SDavid Matlack 
35199eb8ca04SDavid Matlack 	if (success) {
35209eb8ca04SDavid Matlack 		++vcpu->stat.generic.halt_successful_poll;
35219eb8ca04SDavid Matlack 
35229eb8ca04SDavid Matlack 		if (!vcpu_valid_wakeup(vcpu))
3523175d5dc7SDavid Matlack 			++vcpu->stat.generic.halt_poll_invalid;
3524175d5dc7SDavid Matlack 
3525fac42688SSean Christopherson 		stats->halt_poll_success_ns += poll_ns;
3526fac42688SSean Christopherson 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3527fac42688SSean Christopherson 	} else {
3528fac42688SSean Christopherson 		stats->halt_poll_fail_ns += poll_ns;
3529fac42688SSean Christopherson 		KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3530fac42688SSean Christopherson 	}
353191b99ea7SSean Christopherson }
35320fce5623SAvi Kivity 
kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu * vcpu)3533175d5dc7SDavid Matlack static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu)
35346f390916SSean Christopherson {
35350fce5623SAvi Kivity 	struct kvm *kvm = vcpu->kvm;
35360fce5623SAvi Kivity 
353797b6847aSDavid Matlack 	if (kvm->override_halt_poll_ns) {
353891b99ea7SSean Christopherson 		/*
35390fce5623SAvi Kivity 		 * Ensure kvm->max_halt_poll_ns is not read before
3540175d5dc7SDavid Matlack 		 * kvm->override_halt_poll_ns.
3541175d5dc7SDavid Matlack 		 *
354297b6847aSDavid Matlack 		 * Pairs with the smp_wmb() when enabling KVM_CAP_HALT_POLL.
354397b6847aSDavid Matlack 		 */
354497b6847aSDavid Matlack 		smp_rmb();
35450fce5623SAvi Kivity 		return READ_ONCE(kvm->max_halt_poll_ns);
35468df6a61cSSean Christopherson 	}
3547109a9826SSean Christopherson 
3548d255f4f2SZhai, Edwin 	return READ_ONCE(halt_poll_ns);
3549d255f4f2SZhai, Edwin }
355030c94347SSean Christopherson 
35510fce5623SAvi Kivity /*
35520fce5623SAvi Kivity  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
35530fce5623SAvi Kivity  * polling is enabled, busy wait for a short time before blocking to avoid the
35540fce5623SAvi Kivity  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
35550fce5623SAvi Kivity  * is halted.
35560fce5623SAvi Kivity  */
kvm_vcpu_halt(struct kvm_vcpu * vcpu)3557fac42688SSean Christopherson void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3558f6c60d08SSean Christopherson {
3559f7819512SPaolo Bonzini 	unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
356087bcc5faSJing Zhang 	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
356187bcc5faSJing Zhang 	ktime_t start, cur, poll_end;
356287bcc5faSJing Zhang 	bool waited = false;
35638ccba534SJing Zhang 	bool do_halt_poll;
35648ccba534SJing Zhang 	u64 halt_ns;
356587bcc5faSJing Zhang 
3566f7819512SPaolo Bonzini 	if (vcpu->halt_poll_ns > max_halt_poll_ns)
356791b99ea7SSean Christopherson 		vcpu->halt_poll_ns = max_halt_poll_ns;
356891b99ea7SSean Christopherson 
3569aca6ff29SWanpeng Li 	do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
357029e72893SSean Christopherson 
357129e72893SSean Christopherson 	start = cur = poll_end = ktime_get();
357229e72893SSean Christopherson 	if (do_halt_poll) {
357329e72893SSean Christopherson 		ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
357429e72893SSean Christopherson 
35758df6a61cSSean Christopherson 		do {
357629e72893SSean Christopherson 			if (kvm_vcpu_check_block(vcpu) < 0)
3577cb953129SDavid Matlack 				goto out;
35786f390916SSean Christopherson 			cpu_relax();
3579175d5dc7SDavid Matlack 			poll_end = cur = ktime_get();
3580175d5dc7SDavid Matlack 		} while (kvm_vcpu_can_poll(cur, stop));
3581175d5dc7SDavid Matlack 	}
358244551b2fSWanpeng Li 
35832086d320SChristian Borntraeger 	waited = kvm_vcpu_block(vcpu);
3584175d5dc7SDavid Matlack 
358591b99ea7SSean Christopherson 	cur = ktime_get();
3586aca6ff29SWanpeng Li 	if (waited) {
3587aca6ff29SWanpeng Li 		vcpu->stat.generic.halt_wait_ns +=
3588acd05785SDavid Matlack 			ktime_to_ns(cur) - ktime_to_ns(poll_end);
3589175d5dc7SDavid Matlack 		KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3590aca6ff29SWanpeng Li 				ktime_to_ns(cur) - ktime_to_ns(poll_end));
3591aca6ff29SWanpeng Li 	}
3592175d5dc7SDavid Matlack out:
3593175d5dc7SDavid Matlack 	/* The total time the vCPU was "halted", including polling time. */
3594aca6ff29SWanpeng Li 	halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
359544551b2fSWanpeng Li 
3596edb9272fSWanpeng Li 	/*
359744551b2fSWanpeng Li 	 * Note, halt-polling is considered successful so long as the vCPU was
359844551b2fSWanpeng Li 	 * never actually scheduled out, i.e. even if the wake event arrived
3599aca6ff29SWanpeng Li 	 * after of the halt-polling loop itself, but before the full wait.
360091b99ea7SSean Christopherson 	 */
36010fce5623SAvi Kivity 	if (do_halt_poll)
360291b99ea7SSean Christopherson 		update_halt_poll_stats(vcpu, start, poll_end, !waited);
36030fce5623SAvi Kivity 
3604178f02ffSRadim Krčmář 	if (halt_poll_allowed) {
3605b6d33834SChristoffer Dall 		/* Recompute the max halt poll time in case it changed. */
3606d92a5d1cSSean Christopherson 		max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu);
3607d73eb57bSWanpeng Li 
36080193cc90SJing Zhang 		if (!vcpu_valid_wakeup(vcpu)) {
3609178f02ffSRadim Krčmář 			shrink_halt_poll_ns(vcpu);
3610b6d33834SChristoffer Dall 		} else if (max_halt_poll_ns) {
3611b6d33834SChristoffer Dall 			if (halt_ns <= vcpu->halt_poll_ns)
3612178f02ffSRadim Krčmář 				;
3613dd1a4cc1SRadim Krčmář 			/* we had a long block, shrink polling */
3614dd1a4cc1SRadim Krčmář 			else if (vcpu->halt_poll_ns &&
3615dd1a4cc1SRadim Krčmář 				 halt_ns > max_halt_poll_ns)
36160266c894SPaolo Bonzini 				shrink_halt_poll_ns(vcpu);
3617dd1a4cc1SRadim Krčmář 			/* we had a short halt and our poll time is too small */
3618dd1a4cc1SRadim Krčmář 			else if (vcpu->halt_poll_ns < max_halt_poll_ns &&
3619dd1a4cc1SRadim Krčmář 				 halt_ns < max_halt_poll_ns)
3620dd1a4cc1SRadim Krčmář 				grow_halt_poll_ns(vcpu);
3621dd1a4cc1SRadim Krčmář 		} else {
362285b64045SSean Christopherson 			vcpu->halt_poll_ns = 0;
3623dd1a4cc1SRadim Krčmář 		}
3624178f02ffSRadim Krčmář 	}
3625178f02ffSRadim Krčmář 
3626178f02ffSRadim Krčmář 	trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3627aefdc2edSPaolo Bonzini }
3628aefdc2edSPaolo Bonzini EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3629aefdc2edSPaolo Bonzini 
kvm_vcpu_wake_up(struct kvm_vcpu * vcpu)3630aefdc2edSPaolo Bonzini bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3631aefdc2edSPaolo Bonzini {
3632aefdc2edSPaolo Bonzini 	if (__kvm_vcpu_wake_up(vcpu)) {
3633aefdc2edSPaolo Bonzini 		WRITE_ONCE(vcpu->ready, true);
3634aefdc2edSPaolo Bonzini 		++vcpu->stat.generic.halt_wakeup;
3635aefdc2edSPaolo Bonzini 		return true;
3636aefdc2edSPaolo Bonzini 	}
3637aefdc2edSPaolo Bonzini 
3638aefdc2edSPaolo Bonzini 	return false;
3639aefdc2edSPaolo Bonzini }
364085b64045SSean Christopherson EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
364185b64045SSean Christopherson 
364285b64045SSean Christopherson #ifndef CONFIG_S390
364385b64045SSean Christopherson /*
364485b64045SSean Christopherson  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
364585b64045SSean Christopherson  */
kvm_vcpu_kick(struct kvm_vcpu * vcpu)364685b64045SSean Christopherson void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
364785b64045SSean Christopherson {
364885b64045SSean Christopherson 	int me, cpu;
3649b6d33834SChristoffer Dall 
3650b6d33834SChristoffer Dall 	if (kvm_vcpu_wake_up(vcpu))
365185b64045SSean Christopherson 		return;
3652aefdc2edSPaolo Bonzini 
3653b6d33834SChristoffer Dall 	me = get_cpu();
3654b6d33834SChristoffer Dall 	/*
3655a20ed54dSYang Zhang 	 * The only state change done outside the vcpu mutex is IN_GUEST_MODE
36560266c894SPaolo Bonzini 	 * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3657b6d33834SChristoffer Dall 	 * kick" check does not need atomic operations if kvm_vcpu_kick is used
3658fa93384fSDan Carpenter 	 * within the vCPU thread itself.
365941628d33SKonstantin Weitz 	 */
366041628d33SKonstantin Weitz 	if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
366141628d33SKonstantin Weitz 		if (vcpu->mode == IN_GUEST_MODE)
3662fa93384fSDan Carpenter 			WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
366341628d33SKonstantin Weitz 		goto out;
366441628d33SKonstantin Weitz 	}
366541628d33SKonstantin Weitz 
366641628d33SKonstantin Weitz 	/*
366727fbe64bSSam Bobroff 	 * Note, the vCPU could get migrated to a different pCPU at any point
366841628d33SKonstantin Weitz 	 * after kvm_arch_vcpu_should_kick(), which could result in sending an
366941628d33SKonstantin Weitz 	 * IPI to the previous pCPU.  But, that's ok because the purpose of the
3670c45c528eSRaghavendra K T 	 * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3671c45c528eSRaghavendra K T 	 * vCPU also requires it to leave IN_GUEST_MODE.
367241628d33SKonstantin Weitz 	 */
3673c45c528eSRaghavendra K T 	if (kvm_arch_vcpu_should_kick(vcpu)) {
3674c45c528eSRaghavendra K T 		cpu = READ_ONCE(vcpu->cpu);
367541628d33SKonstantin Weitz 		if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
367641628d33SKonstantin Weitz 			smp_send_reschedule(cpu);
367741628d33SKonstantin Weitz 	}
367806e48c51SRaghavendra K T out:
367906e48c51SRaghavendra K T 	put_cpu();
368006e48c51SRaghavendra K T }
368106e48c51SRaghavendra K T EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
368206e48c51SRaghavendra K T #endif /* !CONFIG_S390 */
368306e48c51SRaghavendra K T 
kvm_vcpu_yield_to(struct kvm_vcpu * target)3684656012c7SFuad Tabba int kvm_vcpu_yield_to(struct kvm_vcpu *target)
368506e48c51SRaghavendra K T {
368606e48c51SRaghavendra K T 	struct pid *pid;
368706e48c51SRaghavendra K T 	struct task_struct *task = NULL;
368806e48c51SRaghavendra K T 	int ret = 0;
368906e48c51SRaghavendra K T 
369006e48c51SRaghavendra K T 	rcu_read_lock();
369106e48c51SRaghavendra K T 	pid = rcu_dereference(target->pid);
369206e48c51SRaghavendra K T 	if (pid)
369306e48c51SRaghavendra K T 		task = get_pid_task(pid, PIDTYPE_PID);
369406e48c51SRaghavendra K T 	rcu_read_unlock();
369506e48c51SRaghavendra K T 	if (!task)
369606e48c51SRaghavendra K T 		return ret;
369706e48c51SRaghavendra K T 	ret = yield_to(task, 1);
369806e48c51SRaghavendra K T 	put_task_struct(task);
369906e48c51SRaghavendra K T 
37007940876eSStephen Hemminger 	return ret;
370106e48c51SRaghavendra K T }
37024a55dd72SScott Wood EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
370306e48c51SRaghavendra K T 
370406e48c51SRaghavendra K T /*
370506e48c51SRaghavendra K T  * Helper that checks whether a VCPU is eligible for directed yield.
370634656113SChristian Borntraeger  * Most eligible candidate to yield is decided by following heuristics:
370706e48c51SRaghavendra K T  *
370806e48c51SRaghavendra K T  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
370906e48c51SRaghavendra K T  *  (preempted lock holder), indicated by @in_spin_loop.
371006e48c51SRaghavendra K T  *  Set at the beginning and cleared at the end of interception/PLE handler.
371106e48c51SRaghavendra K T  *
37124a55dd72SScott Wood  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
37134a55dd72SScott Wood  *  chance last time (mostly it has become eligible now since we have probably
371406e48c51SRaghavendra K T  *  yielded to lockholder in last iteration. This is done by toggling
37154a55dd72SScott Wood  *  @dy_eligible each time a VCPU checked for eligibility.)
3716c45c528eSRaghavendra K T  *
371717e433b5SWanpeng Li  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
371817e433b5SWanpeng Li  *  to preempted lock-holder could result in wrong VCPU selection and CPU
371917e433b5SWanpeng Li  *  burning. Giving priority for a potential lock-holder increases lock
372017e433b5SWanpeng Li  *  progress.
372117e433b5SWanpeng Li  *
372217e433b5SWanpeng Li  *  Since algorithm is based on heuristics, accessing another VCPU data without
372317e433b5SWanpeng Li  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
372417e433b5SWanpeng Li  *  and continue with next VCPU and so on.
372517e433b5SWanpeng Li  */
kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu * vcpu)372617e433b5SWanpeng Li static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
372717e433b5SWanpeng Li {
372817e433b5SWanpeng Li #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
372917e433b5SWanpeng Li 	bool eligible;
373017e433b5SWanpeng Li 
373117e433b5SWanpeng Li 	eligible = !vcpu->spin_loop.in_spin_loop ||
373217e433b5SWanpeng Li 		    vcpu->spin_loop.dy_eligible;
373317e433b5SWanpeng Li 
373417e433b5SWanpeng Li 	if (vcpu->spin_loop.in_spin_loop)
373517e433b5SWanpeng Li 		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
373617e433b5SWanpeng Li 
373717e433b5SWanpeng Li 	return eligible;
373817e433b5SWanpeng Li #else
373917e433b5SWanpeng Li 	return true;
374052acd22fSWanpeng Li #endif
374152acd22fSWanpeng Li }
374252acd22fSWanpeng Li 
374352acd22fSWanpeng Li /*
374452acd22fSWanpeng Li  * Unlike kvm_arch_vcpu_runnable, this function is called outside
3745199b5763SLongpeng(Mike)  * a vcpu_load/vcpu_put pair.  However, for most architectures
3746d255f4f2SZhai, Edwin  * kvm_arch_vcpu_runnable does not require vcpu_load.
3747217ece61SRik van Riel  */
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)3748217ece61SRik van Riel bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3749217ece61SRik van Riel {
375046808a4cSMarc Zyngier 	return kvm_arch_vcpu_runnable(vcpu);
3751217ece61SRik van Riel }
3752c45c528eSRaghavendra K T 
vcpu_dy_runnable(struct kvm_vcpu * vcpu)3753217ece61SRik van Riel static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3754d255f4f2SZhai, Edwin {
37554c088493SRaghavendra K T 	if (kvm_arch_dy_runnable(vcpu))
3756217ece61SRik van Riel 		return true;
3757217ece61SRik van Riel 
3758217ece61SRik van Riel #ifdef CONFIG_KVM_ASYNC_PF
3759217ece61SRik van Riel 	if (!list_empty_careful(&vcpu->async_pf.done))
3760217ece61SRik van Riel 		return true;
3761217ece61SRik van Riel #endif
3762217ece61SRik van Riel 
3763c45c528eSRaghavendra K T 	return false;
3764217ece61SRik van Riel }
37655cfc2aabSRik van Riel 
kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu * vcpu)3766217ece61SRik van Riel bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3767217ece61SRik van Riel {
3768217ece61SRik van Riel 	return false;
3769217ece61SRik van Riel }
3770d73eb57bSWanpeng Li 
kvm_vcpu_on_spin(struct kvm_vcpu * me,bool yield_to_kernel_mode)37717bc7ae25SRaghavendra K T void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3772217ece61SRik van Riel {
3773217ece61SRik van Riel 	struct kvm *kvm = me->kvm;
3774d92a5d1cSSean Christopherson 	struct kvm_vcpu *vcpu;
3775217ece61SRik van Riel 	int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3776046ddeedSWanpeng Li 	unsigned long i;
377752acd22fSWanpeng Li 	int yielded = 0;
3778046ddeedSWanpeng Li 	int try = 3;
3779199b5763SLongpeng(Mike) 	int pass;
378006e48c51SRaghavendra K T 
378106e48c51SRaghavendra K T 	kvm_vcpu_set_in_spin_loop(me, true);
3782c45c528eSRaghavendra K T 	/*
3783c45c528eSRaghavendra K T 	 * We boost the priority of a VCPU that is runnable but not
3784c45c528eSRaghavendra K T 	 * currently running, because it got preempted by something
3785217ece61SRik van Riel 	 * else and called schedule in __vcpu_run.  Hopefully that
3786c45c528eSRaghavendra K T 	 * VCPU is holding the lock that we need and will release it.
3787c45c528eSRaghavendra K T 	 * We approximate round-robin by starting at the last boosted VCPU.
3788c45c528eSRaghavendra K T 	 */
3789c45c528eSRaghavendra K T 	for (pass = 0; pass < 2 && !yielded && try; pass++) {
3790217ece61SRik van Riel 		kvm_for_each_vcpu(i, vcpu, kvm) {
3791217ece61SRik van Riel 			if (!pass && i <= last_boosted_vcpu) {
3792217ece61SRik van Riel 				i = last_boosted_vcpu;
3793217ece61SRik van Riel 				continue;
37944c088493SRaghavendra K T 			} else if (pass && i > last_boosted_vcpu)
379506e48c51SRaghavendra K T 				break;
379606e48c51SRaghavendra K T 			if (!READ_ONCE(vcpu->ready))
379706e48c51SRaghavendra K T 				continue;
3798d255f4f2SZhai, Edwin 			if (vcpu == me)
3799d255f4f2SZhai, Edwin 				continue;
3800d255f4f2SZhai, Edwin 			if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
3801fb04a1edSPeter Xu 				continue;
3802fb04a1edSPeter Xu 			if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3803dc70ec21SDavid Woodhouse 			    !kvm_arch_dy_has_pending_interrupt(vcpu) &&
3804fb04a1edSPeter Xu 			    !kvm_arch_vcpu_in_kernel(vcpu))
3805fb04a1edSPeter Xu 				continue;
3806fb04a1edSPeter Xu 			if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3807fb04a1edSPeter Xu 				continue;
3808fb04a1edSPeter Xu 
3809fb04a1edSPeter Xu 			yielded = kvm_vcpu_yield_to(vcpu);
3810fb04a1edSPeter Xu 			if (yielded > 0) {
3811fb04a1edSPeter Xu 				kvm->last_boosted_vcpu = i;
38121499fa80SSouptick Joarder 				break;
38130fce5623SAvi Kivity 			} else if (yielded < 0) {
381411bac800SDave Jiang 				try--;
38150fce5623SAvi Kivity 				if (!try)
38160fce5623SAvi Kivity 					break;
38170fce5623SAvi Kivity 			}
38180fce5623SAvi Kivity 		}
381909566765SAvi Kivity 	}
38200fce5623SAvi Kivity 	kvm_vcpu_set_in_spin_loop(me, false);
38210fce5623SAvi Kivity 
382209566765SAvi Kivity 	/* Ensure vcpu is not eligible during next spinloop */
38234b4357e0SPaolo Bonzini 	kvm_vcpu_set_dy_eligible(me, false);
38245f94c174SLaurent Vivier }
38255f94c174SLaurent Vivier EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
38265f94c174SLaurent Vivier 
kvm_page_in_dirty_ring(struct kvm * kvm,unsigned long pgoff)3827fb04a1edSPeter Xu static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3828fb04a1edSPeter Xu {
3829fb04a1edSPeter Xu #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3830fb04a1edSPeter Xu 	return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
38310fce5623SAvi Kivity 	    (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
38325b1c1493SCarsten Otte 	     kvm->dirty_ring_size / PAGE_SIZE);
38330fce5623SAvi Kivity #else
38340fce5623SAvi Kivity 	return false;
38350fce5623SAvi Kivity #endif
38360fce5623SAvi Kivity }
38370fce5623SAvi Kivity 
kvm_vcpu_fault(struct vm_fault * vmf)3838f0f37e2fSAlexey Dobriyan static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
38390fce5623SAvi Kivity {
38400fce5623SAvi Kivity 	struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
38410fce5623SAvi Kivity 	struct page *page;
38420fce5623SAvi Kivity 
38430fce5623SAvi Kivity 	if (vmf->pgoff == 0)
3844fb04a1edSPeter Xu 		page = virt_to_page(vcpu->run);
384511476d27SYang Li #ifdef CONFIG_X86
3846fb04a1edSPeter Xu 	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3847fb04a1edSPeter Xu 		page = virt_to_page(vcpu->arch.pio_data);
3848fb04a1edSPeter Xu #endif
3849fb04a1edSPeter Xu #ifdef CONFIG_KVM_MMIO
3850fb04a1edSPeter Xu 	else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3851fb04a1edSPeter Xu 		page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
38520fce5623SAvi Kivity #endif
38530fce5623SAvi Kivity 	else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
38540fce5623SAvi Kivity 		page = kvm_dirty_ring_get_page(
38550fce5623SAvi Kivity 		    &vcpu->dirty_ring,
38560fce5623SAvi Kivity 		    vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
38570fce5623SAvi Kivity 	else
38580fce5623SAvi Kivity 		return kvm_arch_vcpu_fault(vcpu, vmf);
38590fce5623SAvi Kivity 	get_page(page);
386066c0b394SAl Viro 	vmf->page = page;
38610fce5623SAvi Kivity 	return 0;
38620fce5623SAvi Kivity }
38630fce5623SAvi Kivity 
386470375c2dSDavid Matlack static const struct vm_operations_struct kvm_vcpu_vm_ops = {
38650fce5623SAvi Kivity 	.fault = kvm_vcpu_fault,
38660fce5623SAvi Kivity };
38670fce5623SAvi Kivity 
kvm_vcpu_mmap(struct file * file,struct vm_area_struct * vma)38686038f373SArnd Bergmann static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
38697ddfd3e0SMarc Zyngier {
38700fce5623SAvi Kivity 	struct kvm_vcpu *vcpu = file->private_data;
38710fce5623SAvi Kivity 	unsigned long pages = vma_pages(vma);
38720fce5623SAvi Kivity 
38730fce5623SAvi Kivity 	if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
38740fce5623SAvi Kivity 	     kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
38750fce5623SAvi Kivity 	    ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
38760fce5623SAvi Kivity 		return -EINVAL;
3877e46b4692SMasatake YAMATO 
3878e46b4692SMasatake YAMATO 	vma->vm_ops = &kvm_vcpu_vm_ops;
3879e46b4692SMasatake YAMATO 	return 0;
3880e46b4692SMasatake YAMATO }
38810fce5623SAvi Kivity 
kvm_vcpu_release(struct inode * inode,struct file * filp)38820fce5623SAvi Kivity static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3883e36de87dSVineeth Pillai {
3884e36de87dSVineeth Pillai 	struct kvm_vcpu *vcpu = filp->private_data;
3885e36de87dSVineeth Pillai 
388614aa40a1SLi kunyu 	kvm_put_kvm(vcpu->kvm);
388776021e96SSean Christopherson 	return 0;
388876021e96SSean Christopherson }
388976021e96SSean Christopherson 
389076021e96SSean Christopherson static const struct file_operations kvm_vcpu_fops = {
3891e36de87dSVineeth Pillai 	.release        = kvm_vcpu_release,
3892e36de87dSVineeth Pillai 	.unlocked_ioctl = kvm_vcpu_ioctl,
3893e36de87dSVineeth Pillai 	.mmap           = kvm_vcpu_mmap,
3894e36de87dSVineeth Pillai 	.llseek		= noop_llseek,
3895e36de87dSVineeth Pillai 	KVM_COMPAT(kvm_vcpu_compat_ioctl),
38963e7093d0SGreg KH };
389745b5939eSLuiz Capitulino 
3898d56f5136SPaolo Bonzini /*
389945b5939eSLuiz Capitulino  * Allocates an inode for the vcpu.
390045b5939eSLuiz Capitulino  */
create_vcpu_fd(struct kvm_vcpu * vcpu)390145b5939eSLuiz Capitulino static int create_vcpu_fd(struct kvm_vcpu *vcpu)
39023e7093d0SGreg KH {
390345b5939eSLuiz Capitulino 	char name[8 + 1 + ITOA_MAX_LEN + 1];
390445b5939eSLuiz Capitulino 
3905d56f5136SPaolo Bonzini 	snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
390645b5939eSLuiz Capitulino 	return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3907e36de87dSVineeth Pillai }
3908e36de87dSVineeth Pillai 
390945b5939eSLuiz Capitulino #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
vcpu_get_pid(void * data,u64 * val)3910d56f5136SPaolo Bonzini static int vcpu_get_pid(void *data, u64 *val)
391145b5939eSLuiz Capitulino {
3912e36de87dSVineeth Pillai 	struct kvm_vcpu *vcpu = data;
391345b5939eSLuiz Capitulino 
39140fce5623SAvi Kivity 	rcu_read_lock();
39150fce5623SAvi Kivity 	*val = pid_nr(rcu_dereference(vcpu->pid));
39160fce5623SAvi Kivity 	rcu_read_unlock();
391773880c80SGleb Natapov 	return 0;
39180fce5623SAvi Kivity }
39190fce5623SAvi Kivity 
3920e09fefdeSDavid Hildenbrand DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
39218bd826d6SSean Christopherson 
kvm_create_vcpu_debugfs(struct kvm_vcpu * vcpu)39220fce5623SAvi Kivity static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3923a1c42ddeSJuergen Gross {
3924338c7dbaSAndy Honig 	struct dentry *debugfs_dentry;
3925338c7dbaSAndy Honig 	char dir_name[ITOA_MAX_LEN * 2];
39266c7caebcSPaolo Bonzini 
3927f502cc56SSean Christopherson 	if (!debugfs_initialized())
39286c7caebcSPaolo Bonzini 		return;
39296c7caebcSPaolo Bonzini 
39306c7caebcSPaolo Bonzini 	snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
39316c7caebcSPaolo Bonzini 	debugfs_dentry = debugfs_create_dir(dir_name,
39321d5e740dSZeng Guang 					    vcpu->kvm->debugfs_dentry);
39331d5e740dSZeng Guang 	debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
39341d5e740dSZeng Guang 			    &vcpu_get_pid_fops);
39351d5e740dSZeng Guang 
39361d5e740dSZeng Guang 	kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
39371d5e740dSZeng Guang }
39386c7caebcSPaolo Bonzini #endif
39396c7caebcSPaolo Bonzini 
39406c7caebcSPaolo Bonzini /*
394185f47930SSean Christopherson  * Creates some virtual cpus.  Good luck creating more than one.
3942e529ef66SSean Christopherson  */
kvm_vm_ioctl_create_vcpu(struct kvm * kvm,u32 id)3943e529ef66SSean Christopherson static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
39446c7caebcSPaolo Bonzini {
39456c7caebcSPaolo Bonzini 	int r;
39460fce5623SAvi Kivity 	struct kvm_vcpu *vcpu;
3947fcd97ad5SPeter Xu 	struct page *page;
394893bb59caSShakeel Butt 
39498bd826d6SSean Christopherson 	if (id >= KVM_MAX_VCPU_IDS)
39508bd826d6SSean Christopherson 		return -EINVAL;
3951e529ef66SSean Christopherson 
39528bd826d6SSean Christopherson 	mutex_lock(&kvm->lock);
39538bd826d6SSean Christopherson 	if (kvm->created_vcpus >= kvm->max_vcpus) {
39548bd826d6SSean Christopherson 		mutex_unlock(&kvm->lock);
39558bd826d6SSean Christopherson 		return -EINVAL;
3956e529ef66SSean Christopherson 	}
3957e529ef66SSean Christopherson 
3958e529ef66SSean Christopherson 	r = kvm_arch_vcpu_precreate(kvm, id);
39598bd826d6SSean Christopherson 	if (r) {
3960e529ef66SSean Christopherson 		mutex_unlock(&kvm->lock);
3961fb04a1edSPeter Xu 		return r;
3962fb04a1edSPeter Xu 	}
3963fb04a1edSPeter Xu 
3964fb04a1edSPeter Xu 	kvm->created_vcpus++;
3965fb04a1edSPeter Xu 	mutex_unlock(&kvm->lock);
3966fb04a1edSPeter Xu 
3967fb04a1edSPeter Xu 	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
39680fce5623SAvi Kivity 	if (!vcpu) {
396942a90008SDavid Woodhouse 		r = -ENOMEM;
397042a90008SDavid Woodhouse 		goto vcpu_decrement;
397142a90008SDavid Woodhouse 	}
397242a90008SDavid Woodhouse 
397342a90008SDavid Woodhouse 	BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
397442a90008SDavid Woodhouse 	page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
397542a90008SDavid Woodhouse 	if (!page) {
3976e09fefdeSDavid Hildenbrand 		r = -ENOMEM;
39770fce5623SAvi Kivity 		goto vcpu_free;
3978d780592bSJan Kiszka 	}
39790fce5623SAvi Kivity 	vcpu->run = page_address(page);
398073880c80SGleb Natapov 
39818750e72aSRadim Krčmář 	kvm_vcpu_init(vcpu, kvm, id);
3982afb2acb2SMichal Luczaj 
3983c5b07754SMarc Zyngier 	r = kvm_arch_vcpu_create(vcpu);
3984c5b07754SMarc Zyngier 	if (r)
39850fce5623SAvi Kivity 		goto vcpu_free_run_page;
39860fce5623SAvi Kivity 
398766c0b394SAl Viro 	if (kvm->dirty_ring_size) {
39880fce5623SAvi Kivity 		r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3989afb2acb2SMichal Luczaj 					 id, kvm->dirty_ring_size);
3990afb2acb2SMichal Luczaj 		if (r)
3991afb2acb2SMichal Luczaj 			goto arch_vcpu_destroy;
39925f643e46SMichal Luczaj 	}
3993afb2acb2SMichal Luczaj 
3994afb2acb2SMichal Luczaj 	mutex_lock(&kvm->lock);
399573880c80SGleb Natapov 
399673880c80SGleb Natapov #ifdef CONFIG_LOCKDEP
3997dd489240SPaolo Bonzini 	/* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
3998c5b07754SMarc Zyngier 	mutex_lock(&vcpu->mutex);
3999c5b07754SMarc Zyngier 	mutex_unlock(&vcpu->mutex);
4000dd489240SPaolo Bonzini #endif
400173880c80SGleb Natapov 
400273880c80SGleb Natapov 	if (kvm_get_vcpu_by_id(kvm, id)) {
400373880c80SGleb Natapov 		r = -EEXIST;
400473880c80SGleb Natapov 		goto unlock_vcpu_destroy;
400542897d86SMarcelo Tosatti 	}
400663d04348SPaolo Bonzini 
40070fce5623SAvi Kivity 	vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
40080fce5623SAvi Kivity 	r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
4009afb2acb2SMichal Luczaj 	if (r)
4010afb2acb2SMichal Luczaj 		goto unlock_vcpu_destroy;
4011afb2acb2SMichal Luczaj 
4012d780592bSJan Kiszka 	/* Now it's all set up, let userspace reach it */
40137d8fece6SGlauber Costa 	kvm_get_kvm(kvm);
4014fb04a1edSPeter Xu 	r = create_vcpu_fd(vcpu);
4015fb04a1edSPeter Xu 	if (r < 0)
40160fce5623SAvi Kivity 		goto kvm_put_xa_release;
40178bd826d6SSean Christopherson 
40188bd826d6SSean Christopherson 	if (KVM_BUG_ON(xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
4019e529ef66SSean Christopherson 		r = -EINVAL;
4020e529ef66SSean Christopherson 		goto kvm_put_xa_release;
40216c7caebcSPaolo Bonzini 	}
40226c7caebcSPaolo Bonzini 
40236c7caebcSPaolo Bonzini 	/*
40246c7caebcSPaolo Bonzini 	 * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
40250fce5623SAvi Kivity 	 * pointer before kvm->online_vcpu's incremented value.
40260fce5623SAvi Kivity 	 */
40270fce5623SAvi Kivity 	smp_wmb();
40280fce5623SAvi Kivity 	atomic_inc(&kvm->online_vcpus);
40290fce5623SAvi Kivity 
40300fce5623SAvi Kivity 	mutex_unlock(&kvm->lock);
40310fce5623SAvi Kivity 	kvm_arch_vcpu_postcreate(vcpu);
40320fce5623SAvi Kivity 	kvm_create_vcpu_debugfs(vcpu);
40330fce5623SAvi Kivity 	return r;
40340fce5623SAvi Kivity 
40350fce5623SAvi Kivity kvm_put_xa_release:
40360fce5623SAvi Kivity 	kvm_put_kvm_no_destroy(kvm);
40370fce5623SAvi Kivity 	xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
40380fce5623SAvi Kivity unlock_vcpu_destroy:
4039ce55c049SJing Zhang 	mutex_unlock(&kvm->lock);
4040ce55c049SJing Zhang 	kvm_dirty_ring_free(&vcpu->dirty_ring);
4041ce55c049SJing Zhang arch_vcpu_destroy:
4042ce55c049SJing Zhang 	kvm_arch_vcpu_destroy(vcpu);
4043ce55c049SJing Zhang vcpu_free_run_page:
4044ce55c049SJing Zhang 	free_page((unsigned long)vcpu->run);
4045ce55c049SJing Zhang vcpu_free:
4046ce55c049SJing Zhang 	kmem_cache_free(kvm_vcpu_cache, vcpu);
4047ce55c049SJing Zhang vcpu_decrement:
4048ce55c049SJing Zhang 	mutex_lock(&kvm->lock);
4049eed3013fSSean Christopherson 	kvm->created_vcpus--;
4050eed3013fSSean Christopherson 	mutex_unlock(&kvm->lock);
4051eed3013fSSean Christopherson 	return r;
4052eed3013fSSean Christopherson }
4053eed3013fSSean Christopherson 
kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu * vcpu,sigset_t * sigset)4054eed3013fSSean Christopherson static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
4055eed3013fSSean Christopherson {
4056eed3013fSSean Christopherson 	if (sigset) {
4057ce55c049SJing Zhang 		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
4058ce55c049SJing Zhang 		vcpu->sigset_active = 1;
4059eed3013fSSean Christopherson 		vcpu->sigset = *sigset;
4060ce55c049SJing Zhang 	} else
4061ce55c049SJing Zhang 		vcpu->sigset_active = 0;
4062ce55c049SJing Zhang 	return 0;
4063ce55c049SJing Zhang }
4064ce55c049SJing Zhang 
kvm_vcpu_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)4065ce55c049SJing Zhang static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
4066ce55c049SJing Zhang 			      size_t size, loff_t *offset)
4067ce55c049SJing Zhang {
4068ce55c049SJing Zhang 	struct kvm_vcpu *vcpu = file->private_data;
4069ce55c049SJing Zhang 
4070ce55c049SJing Zhang 	return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
4071ce55c049SJing Zhang 			&kvm_vcpu_stats_desc[0], &vcpu->stat,
4072ce55c049SJing Zhang 			sizeof(vcpu->stat), user_buffer, size, offset);
4073ce55c049SJing Zhang }
4074ce55c049SJing Zhang 
kvm_vcpu_stats_release(struct inode * inode,struct file * file)4075ce55c049SJing Zhang static int kvm_vcpu_stats_release(struct inode *inode, struct file *file)
4076ce55c049SJing Zhang {
4077ce55c049SJing Zhang 	struct kvm_vcpu *vcpu = file->private_data;
4078ce55c049SJing Zhang 
4079ce55c049SJing Zhang 	kvm_put_kvm(vcpu->kvm);
4080eed3013fSSean Christopherson 	return 0;
4081eed3013fSSean Christopherson }
4082eed3013fSSean Christopherson 
4083ce55c049SJing Zhang static const struct file_operations kvm_vcpu_stats_fops = {
4084ce55c049SJing Zhang 	.read = kvm_vcpu_stats_read,
4085ce55c049SJing Zhang 	.release = kvm_vcpu_stats_release,
4086ce55c049SJing Zhang 	.llseek = noop_llseek,
4087ce55c049SJing Zhang };
4088ce55c049SJing Zhang 
kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu * vcpu)40890fce5623SAvi Kivity static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
40900fce5623SAvi Kivity {
40910fce5623SAvi Kivity 	int fd;
40920fce5623SAvi Kivity 	struct file *file;
40930fce5623SAvi Kivity 	char name[15 + ITOA_MAX_LEN + 1];
40940fce5623SAvi Kivity 
4095fa3795a7SDave Hansen 	snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4096fa3795a7SDave Hansen 
40970fce5623SAvi Kivity 	fd = get_unused_fd_flags(O_CLOEXEC);
4098f4d31653SPaolo Bonzini 	if (fd < 0)
40990fce5623SAvi Kivity 		return fd;
41002122ff5eSAvi Kivity 
41012ea75be3SDavid Matlack 	file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
41022ea75be3SDavid Matlack 	if (IS_ERR(file)) {
41032ea75be3SDavid Matlack 		put_unused_fd(fd);
41042122ff5eSAvi Kivity 		return PTR_ERR(file);
41055cb0944cSPaolo Bonzini 	}
41065cb0944cSPaolo Bonzini 
41072122ff5eSAvi Kivity 	kvm_get_kvm(vcpu->kvm);
41085cb0944cSPaolo Bonzini 
41095cb0944cSPaolo Bonzini 	file->f_mode |= FMODE_PREAD;
41109fc77441SMichael S. Tsirkin 	fd_install(fd, file);
41112122ff5eSAvi Kivity 
4112ec7660ccSChristoffer Dall 	return fd;
4113ec7660ccSChristoffer Dall }
41140fce5623SAvi Kivity 
kvm_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)41150e4524a5SChristian Borntraeger static long kvm_vcpu_ioctl(struct file *filp,
41160e4524a5SChristian Borntraeger 			   unsigned int ioctl, unsigned long arg)
41170fce5623SAvi Kivity {
41180fce5623SAvi Kivity 	struct kvm_vcpu *vcpu = filp->private_data;
41190fce5623SAvi Kivity 	void __user *argp = (void __user *)arg;
41200e4524a5SChristian Borntraeger 	int r;
412171dbc8a9SEric W. Biederman 	struct kvm_fpu *fpu = NULL;
41227a72f7a1SChristian Borntraeger 	struct kvm_sregs *kvm_sregs = NULL;
4123bd2a6394SChristoffer Dall 
4124f95ef0cdSXiubo Li 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4125bd2a6394SChristoffer Dall 		return -EIO;
4126bd2a6394SChristoffer Dall 
4127bd2a6394SChristoffer Dall 	if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4128bd2a6394SChristoffer Dall 		return -EINVAL;
4129bd2a6394SChristoffer Dall 
41307a72f7a1SChristian Borntraeger 	/*
41317a72f7a1SChristian Borntraeger 	 * Some architectures have vcpu ioctls that are asynchronous to vcpu
41327a72f7a1SChristian Borntraeger 	 * execution; mutex_lock() would break them.
41337a72f7a1SChristian Borntraeger 	 */
41347a72f7a1SChristian Borntraeger 	r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
41351b94f6f8STianjia Zhang 	if (r != -ENOIOCTLCMD)
413664be5007SGleb Natapov 		return r;
41370fce5623SAvi Kivity 
41380e4524a5SChristian Borntraeger 	if (mutex_lock_killable(&vcpu->mutex))
41390fce5623SAvi Kivity 		return -EINTR;
41403e4bb3acSXiantao Zhang 	switch (ioctl) {
41410fce5623SAvi Kivity 	case KVM_RUN: {
41423e4bb3acSXiantao Zhang 		struct pid *oldpid;
4143b12ce36aSBen Gardon 		r = -EINVAL;
41443e4bb3acSXiantao Zhang 		if (arg)
41453e4bb3acSXiantao Zhang 			goto out;
41463e4bb3acSXiantao Zhang 		oldpid = rcu_access_pointer(vcpu->pid);
41470fce5623SAvi Kivity 		if (unlikely(oldpid != task_pid(current))) {
41483e4bb3acSXiantao Zhang 			/* The thread running this VCPU changed. */
41490fce5623SAvi Kivity 			struct pid *newpid;
41503e4bb3acSXiantao Zhang 
41513e4bb3acSXiantao Zhang 			r = kvm_arch_vcpu_run_pid_change(vcpu);
41520fce5623SAvi Kivity 			if (r)
41533e4bb3acSXiantao Zhang 				break;
41543e4bb3acSXiantao Zhang 
41550fce5623SAvi Kivity 			newpid = get_task_pid(current, PIDTYPE_PID);
41560fce5623SAvi Kivity 			rcu_assign_pointer(vcpu->pid, newpid);
41570fce5623SAvi Kivity 			if (oldpid)
41583e4bb3acSXiantao Zhang 				synchronize_rcu();
41590fce5623SAvi Kivity 			put_pid(oldpid);
4160ff5c2c03SSasha Levin 		}
4161ff5c2c03SSasha Levin 		r = kvm_arch_vcpu_ioctl_run(vcpu);
4162ff5c2c03SSasha Levin 		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
41633e4bb3acSXiantao Zhang 		break;
4164ff5c2c03SSasha Levin 	}
41653e4bb3acSXiantao Zhang 	case KVM_GET_REGS: {
41663e4bb3acSXiantao Zhang 		struct kvm_regs *kvm_regs;
41670fce5623SAvi Kivity 
41680fce5623SAvi Kivity 		r = -ENOMEM;
41690fce5623SAvi Kivity 		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
4170b12ce36aSBen Gardon 		if (!kvm_regs)
4171b12ce36aSBen Gardon 			goto out;
4172fa3795a7SDave Hansen 		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4173fa3795a7SDave Hansen 		if (r)
4174fa3795a7SDave Hansen 			goto out_free1;
4175fa3795a7SDave Hansen 		r = -EFAULT;
41760fce5623SAvi Kivity 		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
41770fce5623SAvi Kivity 			goto out_free1;
41780fce5623SAvi Kivity 		r = 0;
4179fa3795a7SDave Hansen out_free1:
41800fce5623SAvi Kivity 		kfree(kvm_regs);
41810fce5623SAvi Kivity 		break;
41820fce5623SAvi Kivity 	}
41830fce5623SAvi Kivity 	case KVM_SET_REGS: {
41840fce5623SAvi Kivity 		struct kvm_regs *kvm_regs;
4185ff5c2c03SSasha Levin 
4186ff5c2c03SSasha Levin 		kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4187ff5c2c03SSasha Levin 		if (IS_ERR(kvm_regs)) {
418818595411SGuo Chao 			r = PTR_ERR(kvm_regs);
41890fce5623SAvi Kivity 			goto out;
4190ff5c2c03SSasha Levin 		}
4191fa3795a7SDave Hansen 		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
41920fce5623SAvi Kivity 		kfree(kvm_regs);
41930fce5623SAvi Kivity 		break;
419462d9f0dbSMarcelo Tosatti 	}
419562d9f0dbSMarcelo Tosatti 	case KVM_GET_SREGS: {
419662d9f0dbSMarcelo Tosatti 		kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
419762d9f0dbSMarcelo Tosatti 				    GFP_KERNEL_ACCOUNT);
419862d9f0dbSMarcelo Tosatti 		r = -ENOMEM;
419962d9f0dbSMarcelo Tosatti 		if (!kvm_sregs)
420062d9f0dbSMarcelo Tosatti 			goto out;
4201893bdbf1SXiubo Li 		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
420262d9f0dbSMarcelo Tosatti 		if (r)
420362d9f0dbSMarcelo Tosatti 			goto out;
420462d9f0dbSMarcelo Tosatti 		r = -EFAULT;
420562d9f0dbSMarcelo Tosatti 		if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
420662d9f0dbSMarcelo Tosatti 			goto out;
420762d9f0dbSMarcelo Tosatti 		r = 0;
420862d9f0dbSMarcelo Tosatti 		break;
420962d9f0dbSMarcelo Tosatti 	}
4210893bdbf1SXiubo Li 	case KVM_SET_SREGS: {
421162d9f0dbSMarcelo Tosatti 		kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
421262d9f0dbSMarcelo Tosatti 		if (IS_ERR(kvm_sregs)) {
421362d9f0dbSMarcelo Tosatti 			r = PTR_ERR(kvm_sregs);
421462d9f0dbSMarcelo Tosatti 			kvm_sregs = NULL;
42150fce5623SAvi Kivity 			goto out;
42160fce5623SAvi Kivity 		}
42170fce5623SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
42180fce5623SAvi Kivity 		break;
4219893bdbf1SXiubo Li 	}
42200fce5623SAvi Kivity 	case KVM_GET_MP_STATE: {
42210fce5623SAvi Kivity 		struct kvm_mp_state mp_state;
42220fce5623SAvi Kivity 
42230fce5623SAvi Kivity 		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
42240fce5623SAvi Kivity 		if (r)
4225893bdbf1SXiubo Li 			goto out;
42260fce5623SAvi Kivity 		r = -EFAULT;
42270fce5623SAvi Kivity 		if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
42280fce5623SAvi Kivity 			goto out;
42290fce5623SAvi Kivity 		r = 0;
4230d0bfb940SJan Kiszka 		break;
4231d0bfb940SJan Kiszka 	}
42320fce5623SAvi Kivity 	case KVM_SET_MP_STATE: {
42330fce5623SAvi Kivity 		struct kvm_mp_state mp_state;
4234893bdbf1SXiubo Li 
42350fce5623SAvi Kivity 		r = -EFAULT;
4236d0bfb940SJan Kiszka 		if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
42370fce5623SAvi Kivity 			goto out;
42380fce5623SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
42390fce5623SAvi Kivity 		break;
42400fce5623SAvi Kivity 	}
42410fce5623SAvi Kivity 	case KVM_TRANSLATE: {
42420fce5623SAvi Kivity 		struct kvm_translation tr;
42430fce5623SAvi Kivity 
42440fce5623SAvi Kivity 		r = -EFAULT;
42450fce5623SAvi Kivity 		if (copy_from_user(&tr, argp, sizeof(tr)))
42460fce5623SAvi Kivity 			goto out;
42470fce5623SAvi Kivity 		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4248893bdbf1SXiubo Li 		if (r)
42490fce5623SAvi Kivity 			goto out;
42500fce5623SAvi Kivity 		r = -EFAULT;
4251893bdbf1SXiubo Li 		if (copy_to_user(argp, &tr, sizeof(tr)))
42520fce5623SAvi Kivity 			goto out;
42530fce5623SAvi Kivity 		r = 0;
42540fce5623SAvi Kivity 		break;
4255893bdbf1SXiubo Li 	}
42560fce5623SAvi Kivity 	case KVM_SET_GUEST_DEBUG: {
42570fce5623SAvi Kivity 		struct kvm_guest_debug dbg;
42580fce5623SAvi Kivity 
4259376d41ffSAndi Kleen 		r = -EFAULT;
42600fce5623SAvi Kivity 		if (copy_from_user(&dbg, argp, sizeof(dbg)))
42610fce5623SAvi Kivity 			goto out;
42620fce5623SAvi Kivity 		r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4263b12ce36aSBen Gardon 		break;
4264fa3795a7SDave Hansen 	}
4265fa3795a7SDave Hansen 	case KVM_SET_SIGNAL_MASK: {
4266fa3795a7SDave Hansen 		struct kvm_signal_mask __user *sigmask_arg = argp;
4267fa3795a7SDave Hansen 		struct kvm_signal_mask kvm_sigmask;
42680fce5623SAvi Kivity 		sigset_t sigset, *p;
42690fce5623SAvi Kivity 
42700fce5623SAvi Kivity 		p = NULL;
4271fa3795a7SDave Hansen 		if (argp) {
42720fce5623SAvi Kivity 			r = -EFAULT;
42730fce5623SAvi Kivity 			if (copy_from_user(&kvm_sigmask, argp,
42740fce5623SAvi Kivity 					   sizeof(kvm_sigmask)))
42750fce5623SAvi Kivity 				goto out;
42760fce5623SAvi Kivity 			r = -EINVAL;
4277ff5c2c03SSasha Levin 			if (kvm_sigmask.len != sizeof(sigset))
4278ff5c2c03SSasha Levin 				goto out;
4279ff5c2c03SSasha Levin 			r = -EFAULT;
428018595411SGuo Chao 			if (copy_from_user(&sigset, sigmask_arg->sigset,
42810fce5623SAvi Kivity 					   sizeof(sigset)))
4282ff5c2c03SSasha Levin 				goto out;
4283fa3795a7SDave Hansen 			p = &sigset;
42840fce5623SAvi Kivity 		}
42850fce5623SAvi Kivity 		r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4286ce55c049SJing Zhang 		break;
4287ce55c049SJing Zhang 	}
4288ce55c049SJing Zhang 	case KVM_GET_FPU: {
4289ce55c049SJing Zhang 		fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
42900fce5623SAvi Kivity 		r = -ENOMEM;
42910fce5623SAvi Kivity 		if (!fpu)
42920fce5623SAvi Kivity 			goto out;
42930fce5623SAvi Kivity 		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4294ec7660ccSChristoffer Dall 		if (r)
4295fa3795a7SDave Hansen 			goto out;
4296fa3795a7SDave Hansen 		r = -EFAULT;
42970fce5623SAvi Kivity 		if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
42980fce5623SAvi Kivity 			goto out;
42990fce5623SAvi Kivity 		r = 0;
4300de8e5d74SChristian Borntraeger 		break;
43011dda606cSAlexander Graf 	}
43021dda606cSAlexander Graf 	case KVM_SET_FPU: {
43031dda606cSAlexander Graf 		fpu = memdup_user(argp, sizeof(*fpu));
43041dda606cSAlexander Graf 		if (IS_ERR(fpu)) {
43051dda606cSAlexander Graf 			r = PTR_ERR(fpu);
43061dda606cSAlexander Graf 			fpu = NULL;
43071dda606cSAlexander Graf 			goto out;
4308f4d31653SPaolo Bonzini 		}
43091dda606cSAlexander Graf 		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
43101dda606cSAlexander Graf 		break;
43111dda606cSAlexander Graf 	}
43121dda606cSAlexander Graf 	case KVM_GET_STATS_FD: {
43131dda606cSAlexander Graf 		r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
43141dda606cSAlexander Graf 		break;
43151dda606cSAlexander Graf 	}
43161dda606cSAlexander Graf 	default:
43171dda606cSAlexander Graf 		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
43181dda606cSAlexander Graf 	}
43191dda606cSAlexander Graf out:
4320893bdbf1SXiubo Li 	mutex_unlock(&vcpu->mutex);
43211dda606cSAlexander Graf 	kfree(fpu);
43221dda606cSAlexander Graf 	kfree(kvm_sregs);
43233968cf62SAl Viro 	return r;
43241dda606cSAlexander Graf }
43251dda606cSAlexander Graf 
43261393b4aaSPaolo Bonzini #ifdef CONFIG_KVM_COMPAT
kvm_vcpu_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)43271393b4aaSPaolo Bonzini static long kvm_vcpu_compat_ioctl(struct file *filp,
43281dda606cSAlexander Graf 				  unsigned int ioctl, unsigned long arg)
43291dda606cSAlexander Graf {
4330760a9a30SAlan Cox 	struct kvm_vcpu *vcpu = filp->private_data;
4331760a9a30SAlan Cox 	void __user *argp = compat_ptr(arg);
43321dda606cSAlexander Graf 	int r;
43331dda606cSAlexander Graf 
43341dda606cSAlexander Graf 	if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
43351dda606cSAlexander Graf 		return -EIO;
43361dda606cSAlexander Graf 
43371dda606cSAlexander Graf 	switch (ioctl) {
43381dda606cSAlexander Graf 	case KVM_SET_SIGNAL_MASK: {
43391dda606cSAlexander Graf 		struct kvm_signal_mask __user *sigmask_arg = argp;
43401dda606cSAlexander Graf 		struct kvm_signal_mask kvm_sigmask;
43411dda606cSAlexander Graf 		sigset_t sigset;
43421dda606cSAlexander Graf 
4343a1cd3f08SCédric Le Goater 		if (argp) {
4344a1cd3f08SCédric Le Goater 			r = -EFAULT;
4345a1cd3f08SCédric Le Goater 			if (copy_from_user(&kvm_sigmask, argp,
4346a1cd3f08SCédric Le Goater 					   sizeof(kvm_sigmask)))
4347a1cd3f08SCédric Le Goater 				goto out;
4348a1cd3f08SCédric Le Goater 			r = -EINVAL;
4349a1cd3f08SCédric Le Goater 			if (kvm_sigmask.len != sizeof(compat_sigset_t))
4350a1cd3f08SCédric Le Goater 				goto out;
4351a1cd3f08SCédric Le Goater 			r = -EFAULT;
4352a1cd3f08SCédric Le Goater 			if (get_compat_sigset(&sigset,
4353852b6d57SScott Wood 					      (compat_sigset_t __user *)sigmask_arg->sigset))
4354852b6d57SScott Wood 				goto out;
4355852b6d57SScott Wood 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4356852b6d57SScott Wood 		} else
4357852b6d57SScott Wood 			r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4358852b6d57SScott Wood 		break;
4359852b6d57SScott Wood 	}
4360852b6d57SScott Wood 	default:
4361852b6d57SScott Wood 		r = kvm_vcpu_ioctl(filp, ioctl, arg);
4362852b6d57SScott Wood 	}
4363852b6d57SScott Wood 
4364852b6d57SScott Wood out:
4365852b6d57SScott Wood 	return r;
4366852b6d57SScott Wood }
4367852b6d57SScott Wood #endif
4368852b6d57SScott Wood 
kvm_device_mmap(struct file * filp,struct vm_area_struct * vma)4369852b6d57SScott Wood static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4370852b6d57SScott Wood {
4371852b6d57SScott Wood 	struct kvm_device *dev = filp->private_data;
4372852b6d57SScott Wood 
4373852b6d57SScott Wood 	if (dev->ops->mmap)
4374f4d31653SPaolo Bonzini 		return dev->ops->mmap(dev, vma);
4375ddba9180SSean Christopherson 
4376ddba9180SSean Christopherson 	return -ENODEV;
4377852b6d57SScott Wood }
4378852b6d57SScott Wood 
kvm_device_ioctl_attr(struct kvm_device * dev,int (* accessor)(struct kvm_device * dev,struct kvm_device_attr * attr),unsigned long arg)4379852b6d57SScott Wood static int kvm_device_ioctl_attr(struct kvm_device *dev,
4380852b6d57SScott Wood 				 int (*accessor)(struct kvm_device *dev,
4381852b6d57SScott Wood 						 struct kvm_device_attr *attr),
4382852b6d57SScott Wood 				 unsigned long arg)
4383852b6d57SScott Wood {
4384852b6d57SScott Wood 	struct kvm_device_attr attr;
4385852b6d57SScott Wood 
4386852b6d57SScott Wood 	if (!accessor)
4387852b6d57SScott Wood 		return -EPERM;
4388852b6d57SScott Wood 
4389852b6d57SScott Wood 	if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4390852b6d57SScott Wood 		return -EFAULT;
4391852b6d57SScott Wood 
4392852b6d57SScott Wood 	return accessor(dev, &attr);
4393852b6d57SScott Wood }
4394852b6d57SScott Wood 
kvm_device_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)4395852b6d57SScott Wood static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4396852b6d57SScott Wood 			     unsigned long arg)
43972bde9b3eSCédric Le Goater {
43982bde9b3eSCédric Le Goater 	struct kvm_device *dev = filp->private_data;
43992bde9b3eSCédric Le Goater 
44002bde9b3eSCédric Le Goater 	if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
44012bde9b3eSCédric Le Goater 		return -EIO;
44022bde9b3eSCédric Le Goater 
44032bde9b3eSCédric Le Goater 	switch (ioctl) {
4404852b6d57SScott Wood 	case KVM_SET_DEVICE_ATTR:
4405852b6d57SScott Wood 		return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4406852b6d57SScott Wood 	case KVM_GET_DEVICE_ATTR:
4407852b6d57SScott Wood 		return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4408852b6d57SScott Wood 	case KVM_HAS_DEVICE_ATTR:
4409852b6d57SScott Wood 		return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4410852b6d57SScott Wood 	default:
44117ddfd3e0SMarc Zyngier 		if (dev->ops->ioctl)
4412a1cd3f08SCédric Le Goater 			return dev->ops->ioctl(dev, ioctl, arg);
4413852b6d57SScott Wood 
4414852b6d57SScott Wood 		return -ENOTTY;
4415852b6d57SScott Wood 	}
4416852b6d57SScott Wood }
4417852b6d57SScott Wood 
kvm_device_release(struct inode * inode,struct file * filp)4418852b6d57SScott Wood static int kvm_device_release(struct inode *inode, struct file *filp)
4419852b6d57SScott Wood {
4420852b6d57SScott Wood 	struct kvm_device *dev = filp->private_data;
4421852b6d57SScott Wood 	struct kvm *kvm = dev->kvm;
4422852b6d57SScott Wood 
44238538cb22SSteven Price 	if (dev->ops->release) {
4424d60eacb0SWill Deacon 		mutex_lock(&kvm->lock);
4425d60eacb0SWill Deacon 		list_del(&dev->vm_node);
4426d60eacb0SWill Deacon 		dev->ops->release(dev);
4427d60eacb0SWill Deacon 		mutex_unlock(&kvm->lock);
4428d60eacb0SWill Deacon 	}
4429d60eacb0SWill Deacon 
44308538cb22SSteven Price 	kvm_put_kvm(kvm);
4431d60eacb0SWill Deacon 	return 0;
4432d60eacb0SWill Deacon }
4433d60eacb0SWill Deacon 
4434d60eacb0SWill Deacon static const struct file_operations kvm_device_fops = {
4435d60eacb0SWill Deacon 	.unlocked_ioctl = kvm_device_ioctl,
4436d60eacb0SWill Deacon 	.release = kvm_device_release,
4437d60eacb0SWill Deacon 	KVM_COMPAT(kvm_device_ioctl),
4438d60eacb0SWill Deacon 	.mmap = kvm_device_mmap,
4439d60eacb0SWill Deacon };
4440d60eacb0SWill Deacon 
kvm_device_from_filp(struct file * filp)4441d60eacb0SWill Deacon struct kvm_device *kvm_device_from_filp(struct file *filp)
4442571ee1b6SWanpeng Li {
4443571ee1b6SWanpeng Li 	if (filp->f_op != &kvm_device_fops)
4444571ee1b6SWanpeng Li 		return NULL;
4445571ee1b6SWanpeng Li 
4446571ee1b6SWanpeng Li 	return filp->private_data;
4447571ee1b6SWanpeng Li }
4448852b6d57SScott Wood 
4449852b6d57SScott Wood static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4450852b6d57SScott Wood #ifdef CONFIG_KVM_MPIC
4451eceb6e1dSLi kunyu 	[KVM_DEV_TYPE_FSL_MPIC_20]	= &kvm_mpic_ops,
4452852b6d57SScott Wood 	[KVM_DEV_TYPE_FSL_MPIC_42]	= &kvm_mpic_ops,
4453852b6d57SScott Wood #endif
44541d487e9bSPaolo Bonzini };
4455852b6d57SScott Wood 
kvm_register_device_ops(const struct kvm_device_ops * ops,u32 type)4456852b6d57SScott Wood int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4457d60eacb0SWill Deacon {
4458852b6d57SScott Wood 	if (type >= ARRAY_SIZE(kvm_device_ops_table))
4459d60eacb0SWill Deacon 		return -ENOSPC;
44601d487e9bSPaolo Bonzini 
44611d487e9bSPaolo Bonzini 	if (kvm_device_ops_table[type] != NULL)
4462d60eacb0SWill Deacon 		return -EEXIST;
4463d60eacb0SWill Deacon 
4464852b6d57SScott Wood 	kvm_device_ops_table[type] = ops;
4465852b6d57SScott Wood 	return 0;
4466852b6d57SScott Wood }
4467852b6d57SScott Wood 
kvm_unregister_device_ops(u32 type)4468b12ce36aSBen Gardon void kvm_unregister_device_ops(u32 type)
4469852b6d57SScott Wood {
4470852b6d57SScott Wood 	if (kvm_device_ops_table[type] != NULL)
4471852b6d57SScott Wood 		kvm_device_ops_table[type] = NULL;
4472852b6d57SScott Wood }
4473852b6d57SScott Wood 
kvm_ioctl_create_device(struct kvm * kvm,struct kvm_create_device * cd)4474852b6d57SScott Wood static int kvm_ioctl_create_device(struct kvm *kvm,
4475a28ebea2SChristoffer Dall 				   struct kvm_create_device *cd)
44761d487e9bSPaolo Bonzini {
4477852b6d57SScott Wood 	const struct kvm_device_ops *ops;
4478a28ebea2SChristoffer Dall 	struct kvm_device *dev;
4479852b6d57SScott Wood 	bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4480852b6d57SScott Wood 	int type;
4481852b6d57SScott Wood 	int ret;
4482a28ebea2SChristoffer Dall 
4483a28ebea2SChristoffer Dall 	if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4484852b6d57SScott Wood 		return -ENODEV;
4485023e9fddSChristoffer Dall 
4486023e9fddSChristoffer Dall 	type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4487023e9fddSChristoffer Dall 	ops = kvm_device_ops_table[type];
4488cfa39381SJann Horn 	if (ops == NULL)
448924009b05SYann Droneaud 		return -ENODEV;
4490852b6d57SScott Wood 
4491149487bdSSean Christopherson 	if (test)
4492a28ebea2SChristoffer Dall 		return 0;
4493a28ebea2SChristoffer Dall 
4494e8bc2427SAlexey Kardashevskiy 	dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4495e8bc2427SAlexey Kardashevskiy 	if (!dev)
4496a28ebea2SChristoffer Dall 		return -ENOMEM;
4497e8bc2427SAlexey Kardashevskiy 
4498a0f1d21cSDan Carpenter 	dev->ops = ops;
4499852b6d57SScott Wood 	dev->kvm = kvm;
4500852b6d57SScott Wood 
4501852b6d57SScott Wood 	mutex_lock(&kvm->lock);
4502852b6d57SScott Wood 	ret = ops->create(dev, type);
4503852b6d57SScott Wood 	if (ret < 0) {
4504852b6d57SScott Wood 		mutex_unlock(&kvm->lock);
4505852b6d57SScott Wood 		kfree(dev);
4506f15ba52bSThomas Huth 		return ret;
450792b591a4SAlexander Graf 	}
450892b591a4SAlexander Graf 	list_add(&dev->vm_node, &kvm->devices);
450992b591a4SAlexander Graf 	mutex_unlock(&kvm->lock);
451092b591a4SAlexander Graf 
451192b591a4SAlexander Graf 	if (ops->init)
451292b591a4SAlexander Graf 		ops->init(dev);
451392b591a4SAlexander Graf 
451492b591a4SAlexander Graf 	kvm_get_kvm(kvm);
451592b591a4SAlexander Graf 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4516297e2105SPaul Mackerras 	if (ret < 0) {
4517dc9be0faSPaolo Bonzini 		kvm_put_kvm_no_destroy(kvm);
451892b591a4SAlexander Graf 		mutex_lock(&kvm->lock);
4519e9ea5069SJason Wang 		list_del(&dev->vm_node);
452092b591a4SAlexander Graf 		if (ops->release)
4521e5d83c74SPaolo Bonzini 			ops->release(dev);
4522acd05785SDavid Matlack 		mutex_unlock(&kvm->lock);
452392b591a4SAlexander Graf 		if (ops->destroy)
45244b4357e0SPaolo Bonzini 			ops->destroy(dev);
452530422558SPaolo Bonzini 		return ret;
452630422558SPaolo Bonzini 	}
45270804c849SPeng Hao 
45280804c849SPeng Hao 	cd->fd = ret;
452930422558SPaolo Bonzini 	return 0;
45303c9bd400SJay Zhou }
45313c9bd400SJay Zhou 
kvm_vm_ioctl_check_extension_generic(struct kvm * kvm,long arg)45323c9bd400SJay Zhou static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
45333c9bd400SJay Zhou {
453492b591a4SAlexander Graf 	switch (arg) {
453592b591a4SAlexander Graf 	case KVM_CAP_USER_MEMORY:
453692b591a4SAlexander Graf 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
453792b591a4SAlexander Graf 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4538f481b069SPaolo Bonzini 	case KVM_CAP_INTERNAL_ERROR_DATA:
4539f481b069SPaolo Bonzini #ifdef CONFIG_HAVE_KVM_MSI
4540f481b069SPaolo Bonzini 	case KVM_CAP_SIGNAL_MSI:
4541f481b069SPaolo Bonzini #endif
4542c110ae57SPaolo Bonzini #ifdef CONFIG_HAVE_KVM_IRQFD
4543c110ae57SPaolo Bonzini 	case KVM_CAP_IRQFD:
4544fb04a1edSPeter Xu #endif
454517601bfeSMarc Zyngier 	case KVM_CAP_IOEVENTFD_ANY_LENGTH:
454617601bfeSMarc Zyngier 	case KVM_CAP_CHECK_EXTENSION_VM:
454717601bfeSMarc Zyngier 	case KVM_CAP_ENABLE_CAP_VM:
454817601bfeSMarc Zyngier 	case KVM_CAP_HALT_POLL:
454917601bfeSMarc Zyngier 		return 1;
455017601bfeSMarc Zyngier #ifdef CONFIG_KVM_MMIO
455117601bfeSMarc Zyngier 	case KVM_CAP_COALESCED_MMIO:
4552fb04a1edSPeter Xu 		return KVM_COALESCED_MMIO_PAGE_OFFSET;
4553fb04a1edSPeter Xu 	case KVM_CAP_COALESCED_PIO:
4554fb04a1edSPeter Xu 		return 1;
4555fb04a1edSPeter Xu #endif
455686bdf3ebSGavin Shan #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
455786bdf3ebSGavin Shan 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
455886bdf3ebSGavin Shan 		return KVM_DIRTY_LOG_MANUAL_CAPS;
4559ce55c049SJing Zhang #endif
4560d495f942SPaolo Bonzini #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4561ce55c049SJing Zhang 	case KVM_CAP_IRQ_ROUTING:
456292b591a4SAlexander Graf 		return KVM_MAX_IRQ_ROUTES;
456392b591a4SAlexander Graf #endif
456492b591a4SAlexander Graf #if KVM_ADDRESS_SPACE_NUM > 1
456592b591a4SAlexander Graf 	case KVM_CAP_MULTI_ADDRESS_SPACE:
456692b591a4SAlexander Graf 		return KVM_ADDRESS_SPACE_NUM;
456792b591a4SAlexander Graf #endif
4568fb04a1edSPeter Xu 	case KVM_CAP_NR_MEMSLOTS:
4569fb04a1edSPeter Xu 		return KVM_USER_MEM_SLOTS;
4570fb04a1edSPeter Xu 	case KVM_CAP_DIRTY_LOG_RING:
4571fb04a1edSPeter Xu #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4572fb04a1edSPeter Xu 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4573fb04a1edSPeter Xu #else
4574fb04a1edSPeter Xu 		return 0;
4575fb04a1edSPeter Xu #endif
4576fb04a1edSPeter Xu 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4577fb04a1edSPeter Xu #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4578fb04a1edSPeter Xu 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4579fb04a1edSPeter Xu #else
4580fb04a1edSPeter Xu 		return 0;
4581fb04a1edSPeter Xu #endif
4582fb04a1edSPeter Xu #ifdef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
4583fb04a1edSPeter Xu 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP:
4584fb04a1edSPeter Xu #endif
4585fb04a1edSPeter Xu 	case KVM_CAP_BINARY_STATS_FD:
4586fb04a1edSPeter Xu 	case KVM_CAP_SYSTEM_EVENT_DATA:
4587fb04a1edSPeter Xu 		return 1;
4588fb04a1edSPeter Xu 	default:
4589fb04a1edSPeter Xu 		break;
4590fb04a1edSPeter Xu 	}
4591fb04a1edSPeter Xu 	return kvm_vm_ioctl_check_extension(kvm, arg);
4592fb04a1edSPeter Xu }
4593fb04a1edSPeter Xu 
kvm_vm_ioctl_enable_dirty_log_ring(struct kvm * kvm,u32 size)4594fb04a1edSPeter Xu static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4595fb04a1edSPeter Xu {
4596fb04a1edSPeter Xu 	int r;
4597fb04a1edSPeter Xu 
4598fb04a1edSPeter Xu 	if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4599fb04a1edSPeter Xu 		return -EINVAL;
4600fb04a1edSPeter Xu 
4601fb04a1edSPeter Xu 	/* the size should be power of 2 */
4602fb04a1edSPeter Xu 	if (!size || (size & (size - 1)))
4603fb04a1edSPeter Xu 		return -EINVAL;
4604fb04a1edSPeter Xu 
4605fb04a1edSPeter Xu 	/* Should be bigger to keep the reserved entries, or a page */
4606fb04a1edSPeter Xu 	if (size < kvm_dirty_ring_get_rsvd_entries() *
4607fb04a1edSPeter Xu 	    sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
460846808a4cSMarc Zyngier 		return -EINVAL;
4609fb04a1edSPeter Xu 
4610fb04a1edSPeter Xu 	if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4611fb04a1edSPeter Xu 	    sizeof(struct kvm_dirty_gfn))
4612fb04a1edSPeter Xu 		return -E2BIG;
4613fb04a1edSPeter Xu 
4614fb04a1edSPeter Xu 	/* We only allow it to set once */
4615fb04a1edSPeter Xu 	if (kvm->dirty_ring_size)
4616fb04a1edSPeter Xu 		return -EINVAL;
4617fb04a1edSPeter Xu 
4618fb04a1edSPeter Xu 	mutex_lock(&kvm->lock);
4619fb04a1edSPeter Xu 
4620fb04a1edSPeter Xu 	if (kvm->created_vcpus) {
4621fb04a1edSPeter Xu 		/* We don't allow to change this value after vcpu created */
4622fb04a1edSPeter Xu 		r = -EINVAL;
4623fb04a1edSPeter Xu 	} else {
4624fb04a1edSPeter Xu 		kvm->dirty_ring_size = size;
4625fb04a1edSPeter Xu 		r = 0;
4626fb04a1edSPeter Xu 	}
4627fb04a1edSPeter Xu 
4628e5d83c74SPaolo Bonzini 	mutex_unlock(&kvm->lock);
4629e5d83c74SPaolo Bonzini 	return r;
4630e5d83c74SPaolo Bonzini }
4631e5d83c74SPaolo Bonzini 
kvm_vm_ioctl_reset_dirty_pages(struct kvm * kvm)4632e5d83c74SPaolo Bonzini static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4633e5d83c74SPaolo Bonzini {
463426f45714SRicardo Koller 	unsigned long i;
463586bdf3ebSGavin Shan 	struct kvm_vcpu *vcpu;
463686bdf3ebSGavin Shan 	int cleared = 0;
463786bdf3ebSGavin Shan 
463886bdf3ebSGavin Shan 	if (!kvm->dirty_ring_size)
463986bdf3ebSGavin Shan 		return -EINVAL;
464086bdf3ebSGavin Shan 
464186bdf3ebSGavin Shan 	mutex_lock(&kvm->slots_lock);
464286bdf3ebSGavin Shan 
464386bdf3ebSGavin Shan 	kvm_for_each_vcpu(i, vcpu, kvm)
464486bdf3ebSGavin Shan 		cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
464586bdf3ebSGavin Shan 
464686bdf3ebSGavin Shan 	mutex_unlock(&kvm->slots_lock);
464726f45714SRicardo Koller 
464886bdf3ebSGavin Shan 	if (cleared)
4649e5d83c74SPaolo Bonzini 		kvm_flush_remote_tlbs(kvm);
4650e5d83c74SPaolo Bonzini 
4651e5d83c74SPaolo Bonzini 	return cleared;
4652e5d83c74SPaolo Bonzini }
46532a31b9dbSPaolo Bonzini 
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)46543c9bd400SJay Zhou int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
46553c9bd400SJay Zhou 						  struct kvm_enable_cap *cap)
46563c9bd400SJay Zhou {
46573c9bd400SJay Zhou 	return -EINVAL;
46583c9bd400SJay Zhou }
46593c9bd400SJay Zhou 
kvm_are_all_memslots_empty(struct kvm * kvm)46603c9bd400SJay Zhou bool kvm_are_all_memslots_empty(struct kvm *kvm)
46612a31b9dbSPaolo Bonzini {
46622a31b9dbSPaolo Bonzini 	int i;
46632a31b9dbSPaolo Bonzini 
46643c9bd400SJay Zhou 	lockdep_assert_held(&kvm->slots_lock);
46652a31b9dbSPaolo Bonzini 
4666acd05785SDavid Matlack 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
4667acd05785SDavid Matlack 		if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
4668acd05785SDavid Matlack 			return false;
4669acd05785SDavid Matlack 	}
4670acd05785SDavid Matlack 
46719eb8ca04SDavid Matlack 	return true;
46729eb8ca04SDavid Matlack }
46739eb8ca04SDavid Matlack EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
46749eb8ca04SDavid Matlack 
kvm_vm_ioctl_enable_cap_generic(struct kvm * kvm,struct kvm_enable_cap * cap)46759eb8ca04SDavid Matlack static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
46769eb8ca04SDavid Matlack 					   struct kvm_enable_cap *cap)
46779eb8ca04SDavid Matlack {
46789eb8ca04SDavid Matlack 	switch (cap->cap) {
46799eb8ca04SDavid Matlack #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
46809eb8ca04SDavid Matlack 	case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4681acd05785SDavid Matlack 		u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4682acd05785SDavid Matlack 
4683fb04a1edSPeter Xu 		if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
468417601bfeSMarc Zyngier 			allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
46857a2726ecSGavin Shan 
46867a2726ecSGavin Shan 		if (cap->flags || (cap->args[0] & ~allowed_options))
46877a2726ecSGavin Shan 			return -EINVAL;
4688fb04a1edSPeter Xu 		kvm->manual_dirty_log_protect = cap->args[0];
468986bdf3ebSGavin Shan 		return 0;
469086bdf3ebSGavin Shan 	}
469186bdf3ebSGavin Shan #endif
469286bdf3ebSGavin Shan 	case KVM_CAP_HALT_POLL: {
469386bdf3ebSGavin Shan 		if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
469486bdf3ebSGavin Shan 			return -EINVAL;
469586bdf3ebSGavin Shan 
469686bdf3ebSGavin Shan 		kvm->max_halt_poll_ns = cap->args[0];
469786bdf3ebSGavin Shan 
469886bdf3ebSGavin Shan 		/*
469986bdf3ebSGavin Shan 		 * Ensure kvm->override_halt_poll_ns does not become visible
470086bdf3ebSGavin Shan 		 * before kvm->max_halt_poll_ns.
470186bdf3ebSGavin Shan 		 *
470286bdf3ebSGavin Shan 		 * Pairs with the smp_rmb() in kvm_vcpu_max_halt_poll_ns().
470386bdf3ebSGavin Shan 		 */
470486bdf3ebSGavin Shan 		smp_wmb();
470586bdf3ebSGavin Shan 		kvm->override_halt_poll_ns = true;
470686bdf3ebSGavin Shan 
470786bdf3ebSGavin Shan 		return 0;
470886bdf3ebSGavin Shan 	}
470986bdf3ebSGavin Shan 	case KVM_CAP_DIRTY_LOG_RING:
471086bdf3ebSGavin Shan 	case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
471186bdf3ebSGavin Shan 		if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap))
4712e5d83c74SPaolo Bonzini 			return -EINVAL;
4713e5d83c74SPaolo Bonzini 
4714e5d83c74SPaolo Bonzini 		return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
4715e5d83c74SPaolo Bonzini 	case KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP: {
4716e5d83c74SPaolo Bonzini 		int r = -EINVAL;
4717fcfe1baeSJing Zhang 
4718fcfe1baeSJing Zhang 		if (!IS_ENABLED(CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP) ||
4719fcfe1baeSJing Zhang 		    !kvm->dirty_ring_size || cap->flags)
4720fcfe1baeSJing Zhang 			return r;
4721fcfe1baeSJing Zhang 
4722fcfe1baeSJing Zhang 		mutex_lock(&kvm->slots_lock);
4723fcfe1baeSJing Zhang 
4724fcfe1baeSJing Zhang 		/*
4725fcfe1baeSJing Zhang 		 * For simplicity, allow enabling ring+bitmap if and only if
4726fcfe1baeSJing Zhang 		 * there are no memslots, e.g. to ensure all memslots allocate
4727eed3013fSSean Christopherson 		 * a bitmap after the capability is enabled.
4728eed3013fSSean Christopherson 		 */
4729eed3013fSSean Christopherson 		if (kvm_are_all_memslots_empty(kvm)) {
4730eed3013fSSean Christopherson 			kvm->dirty_ring_with_bitmap = true;
4731eed3013fSSean Christopherson 			r = 0;
4732eed3013fSSean Christopherson 		}
4733eed3013fSSean Christopherson 
4734eed3013fSSean Christopherson 		mutex_unlock(&kvm->slots_lock);
4735fcfe1baeSJing Zhang 
4736fcfe1baeSJing Zhang 		return r;
4737eed3013fSSean Christopherson 	}
4738fcfe1baeSJing Zhang 	default:
4739fcfe1baeSJing Zhang 		return kvm_vm_ioctl_enable_cap(kvm, cap);
4740fcfe1baeSJing Zhang 	}
4741fcfe1baeSJing Zhang }
4742fcfe1baeSJing Zhang 
kvm_vm_stats_read(struct file * file,char __user * user_buffer,size_t size,loff_t * offset)4743fcfe1baeSJing Zhang static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
4744fcfe1baeSJing Zhang 			      size_t size, loff_t *offset)
4745fcfe1baeSJing Zhang {
4746fcfe1baeSJing Zhang 	struct kvm *kvm = file->private_data;
4747fcfe1baeSJing Zhang 
4748fcfe1baeSJing Zhang 	return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
4749fcfe1baeSJing Zhang 				&kvm_vm_stats_desc[0], &kvm->stat,
4750fcfe1baeSJing Zhang 				sizeof(kvm->stat), user_buffer, size, offset);
4751fcfe1baeSJing Zhang }
4752fcfe1baeSJing Zhang 
kvm_vm_stats_release(struct inode * inode,struct file * file)4753fcfe1baeSJing Zhang static int kvm_vm_stats_release(struct inode *inode, struct file *file)
4754fcfe1baeSJing Zhang {
4755fcfe1baeSJing Zhang 	struct kvm *kvm = file->private_data;
4756eed3013fSSean Christopherson 
4757eed3013fSSean Christopherson 	kvm_put_kvm(kvm);
4758eed3013fSSean Christopherson 	return 0;
4759fcfe1baeSJing Zhang }
4760fcfe1baeSJing Zhang 
4761fcfe1baeSJing Zhang static const struct file_operations kvm_vm_stats_fops = {
4762fcfe1baeSJing Zhang 	.read = kvm_vm_stats_read,
4763fcfe1baeSJing Zhang 	.release = kvm_vm_stats_release,
4764fcfe1baeSJing Zhang 	.llseek = noop_llseek,
47650fce5623SAvi Kivity };
47660fce5623SAvi Kivity 
kvm_vm_ioctl_get_stats_fd(struct kvm * kvm)47670fce5623SAvi Kivity static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
47680fce5623SAvi Kivity {
47690fce5623SAvi Kivity 	int fd;
47700fce5623SAvi Kivity 	struct file *file;
47710fce5623SAvi Kivity 
4772f4d31653SPaolo Bonzini 	fd = get_unused_fd_flags(O_CLOEXEC);
47730fce5623SAvi Kivity 	if (fd < 0)
47740fce5623SAvi Kivity 		return fd;
47750fce5623SAvi Kivity 
47760fce5623SAvi Kivity 	file = anon_inode_getfile("kvm-vm-stats",
47770fce5623SAvi Kivity 			&kvm_vm_stats_fops, kvm, O_RDONLY);
4778e5d83c74SPaolo Bonzini 	if (IS_ERR(file)) {
4779e5d83c74SPaolo Bonzini 		put_unused_fd(fd);
4780e5d83c74SPaolo Bonzini 		return PTR_ERR(file);
4781e5d83c74SPaolo Bonzini 	}
4782e5d83c74SPaolo Bonzini 
4783e5d83c74SPaolo Bonzini 	kvm_get_kvm(kvm);
4784e5d83c74SPaolo Bonzini 
4785e5d83c74SPaolo Bonzini 	file->f_mode |= FMODE_PREAD;
4786e5d83c74SPaolo Bonzini 	fd_install(fd, file);
47870fce5623SAvi Kivity 
47880fce5623SAvi Kivity 	return fd;
47890fce5623SAvi Kivity }
47900fce5623SAvi Kivity 
kvm_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)47910fce5623SAvi Kivity static long kvm_vm_ioctl(struct file *filp,
4792893bdbf1SXiubo Li 			   unsigned int ioctl, unsigned long arg)
47930fce5623SAvi Kivity {
47940fce5623SAvi Kivity 	struct kvm *kvm = filp->private_data;
479547ae31e2STakuya Yoshikawa 	void __user *argp = (void __user *)arg;
47960fce5623SAvi Kivity 	int r;
47970fce5623SAvi Kivity 
47980fce5623SAvi Kivity 	if (kvm->mm != current->mm || kvm->vm_dead)
47990fce5623SAvi Kivity 		return -EIO;
48000fce5623SAvi Kivity 	switch (ioctl) {
48010fce5623SAvi Kivity 	case KVM_CREATE_VCPU:
4802893bdbf1SXiubo Li 		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
48030fce5623SAvi Kivity 		break;
48040fce5623SAvi Kivity 	case KVM_ENABLE_CAP: {
48050fce5623SAvi Kivity 		struct kvm_enable_cap cap;
48060fce5623SAvi Kivity 
48072a31b9dbSPaolo Bonzini 		r = -EFAULT;
48082a31b9dbSPaolo Bonzini 		if (copy_from_user(&cap, argp, sizeof(cap)))
48092a31b9dbSPaolo Bonzini 			goto out;
48102a31b9dbSPaolo Bonzini 		r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
48112a31b9dbSPaolo Bonzini 		break;
48122a31b9dbSPaolo Bonzini 	}
48132a31b9dbSPaolo Bonzini 	case KVM_SET_USER_MEMORY_REGION: {
48142a31b9dbSPaolo Bonzini 		struct kvm_userspace_memory_region kvm_userspace_mem;
48152a31b9dbSPaolo Bonzini 
48162a31b9dbSPaolo Bonzini 		r = -EFAULT;
48172a31b9dbSPaolo Bonzini 		if (copy_from_user(&kvm_userspace_mem, argp,
48184b4357e0SPaolo Bonzini 						sizeof(kvm_userspace_mem)))
48195f94c174SLaurent Vivier 			goto out;
48205f94c174SLaurent Vivier 
4821f95ef0cdSXiubo Li 		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
48225f94c174SLaurent Vivier 		break;
4823893bdbf1SXiubo Li 	}
48245f94c174SLaurent Vivier 	case KVM_GET_DIRTY_LOG: {
48255f94c174SLaurent Vivier 		struct kvm_dirty_log log;
48265f94c174SLaurent Vivier 
48275f94c174SLaurent Vivier 		r = -EFAULT;
48285f94c174SLaurent Vivier 		if (copy_from_user(&log, argp, sizeof(log)))
48295f94c174SLaurent Vivier 			goto out;
4830f95ef0cdSXiubo Li 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
48315f94c174SLaurent Vivier 		break;
4832893bdbf1SXiubo Li 	}
48335f94c174SLaurent Vivier #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
48345f94c174SLaurent Vivier 	case KVM_CLEAR_DIRTY_LOG: {
48355f94c174SLaurent Vivier 		struct kvm_clear_dirty_log log;
48365f94c174SLaurent Vivier 
48375f94c174SLaurent Vivier 		r = -EFAULT;
4838721eecbfSGregory Haskins 		if (copy_from_user(&log, argp, sizeof(log)))
4839721eecbfSGregory Haskins 			goto out;
4840721eecbfSGregory Haskins 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4841721eecbfSGregory Haskins 		break;
4842893bdbf1SXiubo Li 	}
4843721eecbfSGregory Haskins #endif
4844d4db2935SAlex Williamson #ifdef CONFIG_KVM_MMIO
4845721eecbfSGregory Haskins 	case KVM_REGISTER_COALESCED_MMIO: {
4846721eecbfSGregory Haskins 		struct kvm_coalesced_mmio_zone zone;
4847d34e6b17SGregory Haskins 
4848d34e6b17SGregory Haskins 		r = -EFAULT;
4849d34e6b17SGregory Haskins 		if (copy_from_user(&zone, argp, sizeof(zone)))
4850d34e6b17SGregory Haskins 			goto out;
4851893bdbf1SXiubo Li 		r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
4852d34e6b17SGregory Haskins 		break;
4853d34e6b17SGregory Haskins 	}
4854d34e6b17SGregory Haskins 	case KVM_UNREGISTER_COALESCED_MMIO: {
4855d34e6b17SGregory Haskins 		struct kvm_coalesced_mmio_zone zone;
485607975ad3SJan Kiszka 
485707975ad3SJan Kiszka 		r = -EFAULT;
485807975ad3SJan Kiszka 		if (copy_from_user(&zone, argp, sizeof(zone)))
485907975ad3SJan Kiszka 			goto out;
486007975ad3SJan Kiszka 		r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
4861893bdbf1SXiubo Li 		break;
486207975ad3SJan Kiszka 	}
486307975ad3SJan Kiszka #endif
486407975ad3SJan Kiszka 	case KVM_IRQFD: {
486507975ad3SJan Kiszka 		struct kvm_irqfd data;
486607975ad3SJan Kiszka 
486723d43cf9SChristoffer Dall 		r = -EFAULT;
486823d43cf9SChristoffer Dall 		if (copy_from_user(&data, argp, sizeof(data)))
486923d43cf9SChristoffer Dall 			goto out;
487023d43cf9SChristoffer Dall 		r = kvm_irqfd(kvm, &data);
487123d43cf9SChristoffer Dall 		break;
487223d43cf9SChristoffer Dall 	}
4873893bdbf1SXiubo Li 	case KVM_IOEVENTFD: {
487423d43cf9SChristoffer Dall 		struct kvm_ioeventfd data;
487523d43cf9SChristoffer Dall 
4876aa2fbe6dSYang Zhang 		r = -EFAULT;
4877aa2fbe6dSYang Zhang 		if (copy_from_user(&data, argp, sizeof(data)))
487823d43cf9SChristoffer Dall 			goto out;
487923d43cf9SChristoffer Dall 		r = kvm_ioeventfd(kvm, &data);
488023d43cf9SChristoffer Dall 		break;
488123d43cf9SChristoffer Dall 	}
488223d43cf9SChristoffer Dall #ifdef CONFIG_HAVE_KVM_MSI
4883893bdbf1SXiubo Li 	case KVM_SIGNAL_MSI: {
488423d43cf9SChristoffer Dall 		struct kvm_msi msi;
488523d43cf9SChristoffer Dall 
488623d43cf9SChristoffer Dall 		r = -EFAULT;
488723d43cf9SChristoffer Dall 		if (copy_from_user(&msi, argp, sizeof(msi)))
488823d43cf9SChristoffer Dall 			goto out;
488923d43cf9SChristoffer Dall 		r = kvm_send_userspace_msi(kvm, &msi);
489023d43cf9SChristoffer Dall 		break;
4891aa8d5944SAlexander Graf 	}
4892aa8d5944SAlexander Graf #endif
4893aa8d5944SAlexander Graf #ifdef __KVM_HAVE_IRQ_LINE
4894aa8d5944SAlexander Graf 	case KVM_IRQ_LINE_STATUS:
4895f8c1b85bSPaolo Bonzini 	case KVM_IRQ_LINE: {
4896aa8d5944SAlexander Graf 		struct kvm_irq_level irq_event;
4897aa8d5944SAlexander Graf 
4898aa8d5944SAlexander Graf 		r = -EFAULT;
4899aa8d5944SAlexander Graf 		if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
4900aa8d5944SAlexander Graf 			goto out;
49015c0aea0eSDavid Hildenbrand 
49025c0aea0eSDavid Hildenbrand 		r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
4903caf1ff26SXiubo Li 					ioctl == KVM_IRQ_LINE_STATUS);
4904aa8d5944SAlexander Graf 		if (r)
4905aa8d5944SAlexander Graf 			goto out;
4906aa8d5944SAlexander Graf 
4907f8c1b85bSPaolo Bonzini 		r = -EFAULT;
4908aa8d5944SAlexander Graf 		if (ioctl == KVM_IRQ_LINE_STATUS) {
49097ec28e26SDenis Efremov 			if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
49107ec28e26SDenis Efremov 				goto out;
49117ec28e26SDenis Efremov 		}
49127ec28e26SDenis Efremov 
49137ec28e26SDenis Efremov 		r = 0;
49147ec28e26SDenis Efremov 		break;
49157ec28e26SDenis Efremov 	}
4916f8c1b85bSPaolo Bonzini #endif
4917aa8d5944SAlexander Graf #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4918aa8d5944SAlexander Graf 	case KVM_SET_GSI_ROUTING: {
49197ec28e26SDenis Efremov 		struct kvm_irq_routing routing;
4920aa8d5944SAlexander Graf 		struct kvm_irq_routing __user *urouting;
4921aa8d5944SAlexander Graf 		struct kvm_irq_routing_entry *entries = NULL;
4922aa8d5944SAlexander Graf 
4923852b6d57SScott Wood 		r = -EFAULT;
4924852b6d57SScott Wood 		if (copy_from_user(&routing, argp, sizeof(routing)))
4925852b6d57SScott Wood 			goto out;
4926852b6d57SScott Wood 		r = -EINVAL;
4927852b6d57SScott Wood 		if (!kvm_arch_can_set_irq_routing(kvm))
4928852b6d57SScott Wood 			goto out;
4929852b6d57SScott Wood 		if (routing.nr > KVM_MAX_IRQ_ROUTES)
4930852b6d57SScott Wood 			goto out;
4931852b6d57SScott Wood 		if (routing.flags)
4932852b6d57SScott Wood 			goto out;
4933852b6d57SScott Wood 		if (routing.nr) {
4934852b6d57SScott Wood 			urouting = argp;
4935852b6d57SScott Wood 			entries = vmemdup_user(urouting->entries,
4936852b6d57SScott Wood 					       array_size(sizeof(*entries),
4937852b6d57SScott Wood 							  routing.nr));
4938852b6d57SScott Wood 			if (IS_ERR(entries)) {
4939852b6d57SScott Wood 				r = PTR_ERR(entries);
4940852b6d57SScott Wood 				goto out;
494192b591a4SAlexander Graf 			}
494292b591a4SAlexander Graf 		}
494392b591a4SAlexander Graf 		r = kvm_set_irq_routing(kvm, entries, routing.nr,
4944fb04a1edSPeter Xu 					routing.flags);
4945fb04a1edSPeter Xu 		kvfree(entries);
4946fb04a1edSPeter Xu 		break;
4947fcfe1baeSJing Zhang 	}
4948fcfe1baeSJing Zhang #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
4949fcfe1baeSJing Zhang 	case KVM_CREATE_DEVICE: {
49500fce5623SAvi Kivity 		struct kvm_create_device cd;
49510fce5623SAvi Kivity 
49520fce5623SAvi Kivity 		r = -EFAULT;
49530fce5623SAvi Kivity 		if (copy_from_user(&cd, argp, sizeof(cd)))
49540fce5623SAvi Kivity 			goto out;
49550fce5623SAvi Kivity 
49560fce5623SAvi Kivity 		r = kvm_ioctl_create_device(kvm, &cd);
4957de8e5d74SChristian Borntraeger 		if (r)
49586ff5894cSArnd Bergmann 			goto out;
49596ff5894cSArnd Bergmann 
49606ff5894cSArnd Bergmann 		r = -EFAULT;
49616ff5894cSArnd Bergmann 		if (copy_to_user(argp, &cd, sizeof(cd)))
49626ff5894cSArnd Bergmann 			goto out;
49636ff5894cSArnd Bergmann 
49646ff5894cSArnd Bergmann 		r = 0;
49656ff5894cSArnd Bergmann 		break;
49666ff5894cSArnd Bergmann 	}
49678750f9bbSPaolo Bonzini 	case KVM_CHECK_EXTENSION:
49688750f9bbSPaolo Bonzini 		r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
49698750f9bbSPaolo Bonzini 		break;
49708750f9bbSPaolo Bonzini 	case KVM_RESET_DIRTY_RINGS:
49718750f9bbSPaolo Bonzini 		r = kvm_vm_ioctl_reset_dirty_pages(kvm);
49728750f9bbSPaolo Bonzini 		break;
49738750f9bbSPaolo Bonzini 	case KVM_GET_STATS_FD:
49748750f9bbSPaolo Bonzini 		r = kvm_vm_ioctl_get_stats_fd(kvm);
49758750f9bbSPaolo Bonzini 		break;
49768750f9bbSPaolo Bonzini 	default:
4977ed51862fSAlexander Graf 		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
4978ed51862fSAlexander Graf 	}
4979ed51862fSAlexander Graf out:
4980ed51862fSAlexander Graf 	return r;
4981ed51862fSAlexander Graf }
4982ed51862fSAlexander Graf 
49836ff5894cSArnd Bergmann #ifdef CONFIG_KVM_COMPAT
49846ff5894cSArnd Bergmann struct compat_kvm_dirty_log {
49856ff5894cSArnd Bergmann 	__u32 slot;
49866ff5894cSArnd Bergmann 	__u32 padding1;
49876ff5894cSArnd Bergmann 	union {
49886ff5894cSArnd Bergmann 		compat_uptr_t dirty_bitmap; /* one bit per page */
4989f4d31653SPaolo Bonzini 		__u64 padding2;
49906ff5894cSArnd Bergmann 	};
4991ed51862fSAlexander Graf };
4992ed51862fSAlexander Graf 
4993ed51862fSAlexander Graf struct compat_kvm_clear_dirty_log {
4994ed51862fSAlexander Graf 	__u32 slot;
4995ed51862fSAlexander Graf 	__u32 num_pages;
49966ff5894cSArnd Bergmann 	__u64 first_page;
49978750f9bbSPaolo Bonzini 	union {
49988750f9bbSPaolo Bonzini 		compat_uptr_t dirty_bitmap; /* one bit per page */
49998750f9bbSPaolo Bonzini 		__u64 padding2;
50008750f9bbSPaolo Bonzini 	};
50018750f9bbSPaolo Bonzini };
50028750f9bbSPaolo Bonzini 
kvm_arch_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)50038750f9bbSPaolo Bonzini long __weak kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
50048750f9bbSPaolo Bonzini 				     unsigned long arg)
50058750f9bbSPaolo Bonzini {
50068750f9bbSPaolo Bonzini 	return -ENOTTY;
50078750f9bbSPaolo Bonzini }
50088750f9bbSPaolo Bonzini 
kvm_vm_compat_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)50098750f9bbSPaolo Bonzini static long kvm_vm_compat_ioctl(struct file *filp,
50108750f9bbSPaolo Bonzini 			   unsigned int ioctl, unsigned long arg)
50118750f9bbSPaolo Bonzini {
50128750f9bbSPaolo Bonzini 	struct kvm *kvm = filp->private_data;
50138750f9bbSPaolo Bonzini 	int r;
50148750f9bbSPaolo Bonzini 
50156ff5894cSArnd Bergmann 	if (kvm->mm != current->mm || kvm->vm_dead)
50166ff5894cSArnd Bergmann 		return -EIO;
50176ff5894cSArnd Bergmann 
50186ff5894cSArnd Bergmann 	r = kvm_arch_vm_compat_ioctl(filp, ioctl, arg);
50196ff5894cSArnd Bergmann 	if (r != -ENOTTY)
50206ff5894cSArnd Bergmann 		return r;
5021f6a3b168SMarkus Elfring 
50226ff5894cSArnd Bergmann 	switch (ioctl) {
50236ff5894cSArnd Bergmann #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
50246ff5894cSArnd Bergmann 	case KVM_CLEAR_DIRTY_LOG: {
50256ff5894cSArnd Bergmann 		struct compat_kvm_clear_dirty_log compat_log;
50266ff5894cSArnd Bergmann 		struct kvm_clear_dirty_log log;
50276ff5894cSArnd Bergmann 
50286ff5894cSArnd Bergmann 		if (copy_from_user(&compat_log, (void __user *)arg,
50296ff5894cSArnd Bergmann 				   sizeof(compat_log)))
50306ff5894cSArnd Bergmann 			return -EFAULT;
50316ff5894cSArnd Bergmann 		log.slot	 = compat_log.slot;
50326ff5894cSArnd Bergmann 		log.num_pages	 = compat_log.num_pages;
50336ff5894cSArnd Bergmann 		log.first_page	 = compat_log.first_page;
50346ff5894cSArnd Bergmann 		log.padding2	 = compat_log.padding2;
50356ff5894cSArnd Bergmann 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
50366ff5894cSArnd Bergmann 
503770375c2dSDavid Matlack 		r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
50380fce5623SAvi Kivity 		break;
50390fce5623SAvi Kivity 	}
50406038f373SArnd Bergmann #endif
50417ddfd3e0SMarc Zyngier 	case KVM_GET_DIRTY_LOG: {
50420fce5623SAvi Kivity 		struct compat_kvm_dirty_log compat_log;
50430fce5623SAvi Kivity 		struct kvm_dirty_log log;
504454526d1fSNathan Tempelman 
504554526d1fSNathan Tempelman 		if (copy_from_user(&compat_log, (void __user *)arg,
504654526d1fSNathan Tempelman 				   sizeof(compat_log)))
504754526d1fSNathan Tempelman 			return -EFAULT;
504854526d1fSNathan Tempelman 		log.slot	 = compat_log.slot;
504954526d1fSNathan Tempelman 		log.padding1	 = compat_log.padding1;
5050e08b9637SCarsten Otte 		log.padding2	 = compat_log.padding2;
50510fce5623SAvi Kivity 		log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
505259f82aadSOliver Upton 
505320020f4cSOliver Upton 		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
50540fce5623SAvi Kivity 		break;
5055506cfba9SAl Viro 	}
50560fce5623SAvi Kivity 	default:
505720020f4cSOliver Upton 		r = kvm_vm_ioctl(filp, ioctl, arg);
505820020f4cSOliver Upton 	}
505920020f4cSOliver Upton 	return r;
506020020f4cSOliver Upton }
506159f82aadSOliver Upton #endif
506259f82aadSOliver Upton 
5063b74ed7a6SOliver Upton static const struct file_operations kvm_vm_fops = {
506420020f4cSOliver Upton 	.release        = kvm_vm_release,
506520020f4cSOliver Upton 	.unlocked_ioctl = kvm_vm_ioctl,
506620020f4cSOliver Upton 	.llseek		= noop_llseek,
506720020f4cSOliver Upton 	KVM_COMPAT(kvm_vm_compat_ioctl),
506820020f4cSOliver Upton };
5069506cfba9SAl Viro 
file_is_kvm(struct file * file)5070506cfba9SAl Viro bool file_is_kvm(struct file *file)
507178588335SMarkus Elfring {
507278588335SMarkus Elfring 	return file && file->f_op == &kvm_vm_fops;
5073506cfba9SAl Viro }
5074536a6f88SJanosch Frank EXPORT_SYMBOL_GPL(file_is_kvm);
5075525df861SPaolo Bonzini 
kvm_dev_ioctl_create_vm(unsigned long type)5076525df861SPaolo Bonzini static int kvm_dev_ioctl_create_vm(unsigned long type)
5077525df861SPaolo Bonzini {
5078525df861SPaolo Bonzini 	char fdname[ITOA_MAX_LEN + 1];
5079525df861SPaolo Bonzini 	int r, fd;
5080525df861SPaolo Bonzini 	struct kvm *kvm;
5081286de8f6SClaudio Imbrenda 	struct file *file;
50820fce5623SAvi Kivity 
508320020f4cSOliver Upton 	fd = get_unused_fd_flags(O_CLOEXEC);
508420020f4cSOliver Upton 	if (fd < 0)
508578588335SMarkus Elfring 		return fd;
508678588335SMarkus Elfring 
508778588335SMarkus Elfring 	snprintf(fdname, sizeof(fdname), "%d", fd);
508820020f4cSOliver Upton 
508920020f4cSOliver Upton 	kvm = kvm_create_vm(type, fdname);
509078588335SMarkus Elfring 	if (IS_ERR(kvm)) {
50910fce5623SAvi Kivity 		r = PTR_ERR(kvm);
50920fce5623SAvi Kivity 		goto put_fd;
50930fce5623SAvi Kivity 	}
50940fce5623SAvi Kivity 
50950fce5623SAvi Kivity 	file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
5096f15ba52bSThomas Huth 	if (IS_ERR(file)) {
50970fce5623SAvi Kivity 		r = PTR_ERR(file);
50980fce5623SAvi Kivity 		goto put_kvm;
50990fce5623SAvi Kivity 	}
51000fce5623SAvi Kivity 
51010fce5623SAvi Kivity 	/*
51020fce5623SAvi Kivity 	 * Don't call kvm_put_kvm anymore at this point; file->f_op is
51030fce5623SAvi Kivity 	 * already set, with ->release() being kvm_vm_release().  In error
51040fce5623SAvi Kivity 	 * cases it will be called by the final fput(file) and will take
5105e08b9637SCarsten Otte 	 * care of doing kvm_put_kvm(kvm).
51060fce5623SAvi Kivity 	 */
51070fce5623SAvi Kivity 	kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
5108784aa3d7SAlexander Graf 
51090fce5623SAvi Kivity 	fd_install(fd, file);
51100fce5623SAvi Kivity 	return fd;
51110fce5623SAvi Kivity 
51120fce5623SAvi Kivity put_kvm:
5113adb1ff46SAvi Kivity 	kvm_put_kvm(kvm);
5114adb1ff46SAvi Kivity put_fd:
5115adb1ff46SAvi Kivity 	put_unused_fd(fd);
5116adb1ff46SAvi Kivity 	return r;
51174b4357e0SPaolo Bonzini }
51185f94c174SLaurent Vivier 
kvm_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)51195f94c174SLaurent Vivier static long kvm_dev_ioctl(struct file *filp,
51200fce5623SAvi Kivity 			  unsigned int ioctl, unsigned long arg)
5121d4c9ff2dSFeng(Eric) Liu {
5122d4c9ff2dSFeng(Eric) Liu 	int r = -EINVAL;
5123d4c9ff2dSFeng(Eric) Liu 
51242023a29cSMarcelo Tosatti 	switch (ioctl) {
5125d4c9ff2dSFeng(Eric) Liu 	case KVM_GET_API_VERSION:
51260fce5623SAvi Kivity 		if (arg)
51270fce5623SAvi Kivity 			goto out;
51280fce5623SAvi Kivity 		r = KVM_API_VERSION;
51290fce5623SAvi Kivity 		break;
51300fce5623SAvi Kivity 	case KVM_CREATE_VM:
51310fce5623SAvi Kivity 		r = kvm_dev_ioctl_create_vm(arg);
51320fce5623SAvi Kivity 		break;
51330fce5623SAvi Kivity 	case KVM_CHECK_EXTENSION:
51340fce5623SAvi Kivity 		r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
51356038f373SArnd Bergmann 		break;
51367ddfd3e0SMarc Zyngier 	case KVM_GET_VCPU_MMAP_SIZE:
51370fce5623SAvi Kivity 		if (arg)
51380fce5623SAvi Kivity 			goto out;
51390fce5623SAvi Kivity 		r = PAGE_SIZE;     /* struct kvm_run */
51400fce5623SAvi Kivity #ifdef CONFIG_X86
51410fce5623SAvi Kivity 		r += PAGE_SIZE;    /* pio data page */
51420fce5623SAvi Kivity #endif
51430fce5623SAvi Kivity #ifdef CONFIG_KVM_MMIO
51440fce5623SAvi Kivity 		r += PAGE_SIZE;    /* coalesced mmio ring page */
5145441f7bfaSSean Christopherson #endif
5146441f7bfaSSean Christopherson 		break;
5147441f7bfaSSean Christopherson 	case KVM_TRACE_ENABLE:
5148441f7bfaSSean Christopherson 	case KVM_TRACE_PAUSE:
5149441f7bfaSSean Christopherson 	case KVM_TRACE_DISABLE:
5150441f7bfaSSean Christopherson 		r = -EOPNOTSUPP;
5151441f7bfaSSean Christopherson 		break;
5152e6fb7d6eSIsaku Yamahata 	default:
51530fce5623SAvi Kivity 		return kvm_arch_dev_ioctl(filp, ioctl, arg);
515437d25881SSean Christopherson 	}
5155e6fb7d6eSIsaku Yamahata out:
515610474ae8SAlexander Graf 	return r;
515737d25881SSean Christopherson }
515837d25881SSean Christopherson 
515937d25881SSean Christopherson static struct file_operations kvm_chardev_ops = {
5160e6fb7d6eSIsaku Yamahata 	.unlocked_ioctl = kvm_dev_ioctl,
516110474ae8SAlexander Graf 	.llseek		= noop_llseek,
516237d25881SSean Christopherson 	KVM_COMPAT(kvm_dev_ioctl),
516337d25881SSean Christopherson };
5164e6fb7d6eSIsaku Yamahata 
5165e6fb7d6eSIsaku Yamahata static struct miscdevice kvm_dev = {
5166e6fb7d6eSIsaku Yamahata 	KVM_MINOR,
5167e6fb7d6eSIsaku Yamahata 	"kvm",
5168e6fb7d6eSIsaku Yamahata 	&kvm_chardev_ops,
5169e6fb7d6eSIsaku Yamahata };
5170e6fb7d6eSIsaku Yamahata 
51710fce5623SAvi Kivity #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
51720fce5623SAvi Kivity __visible bool kvm_rebooting;
5173aaf12a7bSChao Gao EXPORT_SYMBOL_GPL(kvm_rebooting);
517475b7127cSTakuya Yoshikawa 
5175aaf12a7bSChao Gao static DEFINE_PER_CPU(bool, hardware_enabled);
5176aaf12a7bSChao Gao static int kvm_usage_count;
5177aaf12a7bSChao Gao 
__hardware_enable_nolock(void)5178aaf12a7bSChao Gao static int __hardware_enable_nolock(void)
5179aaf12a7bSChao Gao {
5180aaf12a7bSChao Gao 	if (__this_cpu_read(hardware_enabled))
5181aaf12a7bSChao Gao 		return 0;
51820bf50497SIsaku Yamahata 
5183e6fb7d6eSIsaku Yamahata 	if (kvm_arch_hardware_enable()) {
5184e6fb7d6eSIsaku Yamahata 		pr_info("kvm: enabling virtualization on CPU%d failed\n",
51850bf50497SIsaku Yamahata 			raw_smp_processor_id());
5186aaf12a7bSChao Gao 		return -EIO;
518775b7127cSTakuya Yoshikawa 	}
518875b7127cSTakuya Yoshikawa 
518975b7127cSTakuya Yoshikawa 	__this_cpu_write(hardware_enabled, true);
51900fce5623SAvi Kivity 	return 0;
519137d25881SSean Christopherson }
519237d25881SSean Christopherson 
hardware_enable_nolock(void * failed)519337d25881SSean Christopherson static void hardware_enable_nolock(void *failed)
519437d25881SSean Christopherson {
519537d25881SSean Christopherson 	if (__hardware_enable_nolock())
51960fce5623SAvi Kivity 		atomic_inc(failed);
519737d25881SSean Christopherson }
519813a34e06SRadim Krčmář 
kvm_online_cpu(unsigned int cpu)519937d25881SSean Christopherson static int kvm_online_cpu(unsigned int cpu)
520037d25881SSean Christopherson {
52010fce5623SAvi Kivity 	int ret = 0;
52020fce5623SAvi Kivity 
5203aaf12a7bSChao Gao 	/*
520475b7127cSTakuya Yoshikawa 	 * Abort the CPU online process if hardware virtualization cannot
52050bf50497SIsaku Yamahata 	 * be enabled. Otherwise running VMs would encounter unrecoverable
52064fa92fb2SPaolo Bonzini 	 * errors when scheduled to this CPU.
52074fa92fb2SPaolo Bonzini 	 */
52080bf50497SIsaku Yamahata 	mutex_lock(&kvm_lock);
52098c18b2d2SThomas Gleixner 	if (kvm_usage_count)
521075b7127cSTakuya Yoshikawa 		ret = __hardware_enable_nolock();
521175b7127cSTakuya Yoshikawa 	mutex_unlock(&kvm_lock);
521210474ae8SAlexander Graf 	return ret;
521310474ae8SAlexander Graf }
521410474ae8SAlexander Graf 
hardware_disable_nolock(void * junk)521510474ae8SAlexander Graf static void hardware_disable_nolock(void *junk)
521610474ae8SAlexander Graf {
521710474ae8SAlexander Graf 	/*
521875b7127cSTakuya Yoshikawa 	 * Note, hardware_disable_all_nolock() tells all online CPUs to disable
521910474ae8SAlexander Graf 	 * hardware, not just CPUs that successfully enabled hardware!
522010474ae8SAlexander Graf 	 */
522110474ae8SAlexander Graf 	if (!__this_cpu_read(hardware_enabled))
522210474ae8SAlexander Graf 		return;
5223e4aa7f88SChao Gao 
52240bf50497SIsaku Yamahata 	kvm_arch_hardware_disable();
522510474ae8SAlexander Graf 
52260bf50497SIsaku Yamahata 	__this_cpu_write(hardware_enabled, false);
5227e4aa7f88SChao Gao }
522810474ae8SAlexander Graf 
kvm_offline_cpu(unsigned int cpu)522910474ae8SAlexander Graf static int kvm_offline_cpu(unsigned int cpu)
523010474ae8SAlexander Graf {
523110474ae8SAlexander Graf 	mutex_lock(&kvm_lock);
5232e6fb7d6eSIsaku Yamahata 	if (kvm_usage_count)
5233e0ceec22SSean Christopherson 		hardware_disable_nolock(NULL);
5234e0ceec22SSean Christopherson 	mutex_unlock(&kvm_lock);
5235e0ceec22SSean Christopherson 	return 0;
5236e0ceec22SSean Christopherson }
5237e0ceec22SSean Christopherson 
hardware_disable_all_nolock(void)5238e0ceec22SSean Christopherson static void hardware_disable_all_nolock(void)
5239e0ceec22SSean Christopherson {
5240e0ceec22SSean Christopherson 	BUG_ON(!kvm_usage_count);
5241e0ceec22SSean Christopherson 
5242e0ceec22SSean Christopherson 	kvm_usage_count--;
5243e0ceec22SSean Christopherson 	if (!kvm_usage_count)
5244e0ceec22SSean Christopherson 		on_each_cpu(hardware_disable_nolock, NULL, 1);
5245e0ceec22SSean Christopherson }
5246e0ceec22SSean Christopherson 
hardware_disable_all(void)524710474ae8SAlexander Graf static void hardware_disable_all(void)
5248e4aa7f88SChao Gao {
5249e4aa7f88SChao Gao 	cpus_read_lock();
5250e4aa7f88SChao Gao 	mutex_lock(&kvm_lock);
5251e4aa7f88SChao Gao 	hardware_disable_all_nolock();
5252e4aa7f88SChao Gao 	mutex_unlock(&kvm_lock);
5253e4aa7f88SChao Gao 	cpus_read_unlock();
5254e4aa7f88SChao Gao }
5255e4aa7f88SChao Gao 
hardware_enable_all(void)5256e4aa7f88SChao Gao static int hardware_enable_all(void)
52570bf50497SIsaku Yamahata {
525810474ae8SAlexander Graf 	atomic_t failed = ATOMIC_INIT(0);
5259e0ceec22SSean Christopherson 	int r;
5260e0ceec22SSean Christopherson 
526110474ae8SAlexander Graf 	/*
526210474ae8SAlexander Graf 	 * Do not enable hardware virtualization if the system is going down.
5263e6fb7d6eSIsaku Yamahata 	 * If userspace initiated a forced reboot, e.g. reboot -f, then it's
526410474ae8SAlexander Graf 	 * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
5265e6fb7d6eSIsaku Yamahata 	 * after kvm_reboot() is called.  Note, this relies on system_state
526610474ae8SAlexander Graf 	 * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
526710474ae8SAlexander Graf 	 * hook instead of registering a dedicated reboot notifier (the latter
526810474ae8SAlexander Graf 	 * runs before system_state is updated).
526910474ae8SAlexander Graf 	 */
527010474ae8SAlexander Graf 	if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
52710bf50497SIsaku Yamahata 	    system_state == SYSTEM_RESTART)
5272e4aa7f88SChao Gao 		return -EBUSY;
527310474ae8SAlexander Graf 
527410474ae8SAlexander Graf 	/*
527510474ae8SAlexander Graf 	 * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
527610474ae8SAlexander Graf 	 * is called, and so on_each_cpu() between them includes the CPU that
52776735150bSSean Christopherson 	 * is being onlined.  As a result, hardware_enable_nolock() may get
52780fce5623SAvi Kivity 	 * invoked before kvm_online_cpu(), which also enables hardware if the
52790fce5623SAvi Kivity 	 * usage count is non-zero.  Disable CPU hotplug to avoid attempting to
52806735150bSSean Christopherson 	 * enable hardware multiple times.
52816735150bSSean Christopherson 	 */
52826735150bSSean Christopherson 	cpus_read_lock();
52836735150bSSean Christopherson 	mutex_lock(&kvm_lock);
52846735150bSSean Christopherson 
52856735150bSSean Christopherson 	r = 0;
52866735150bSSean Christopherson 
52876735150bSSean Christopherson 	kvm_usage_count++;
52886735150bSSean Christopherson 	if (kvm_usage_count == 1) {
52890fce5623SAvi Kivity 		on_each_cpu(hardware_enable_nolock, &failed, 1);
52901170adc6SXiubo Li 
52914ecac3fdSAvi Kivity 		if (atomic_read(&failed)) {
529275b7127cSTakuya Yoshikawa 			hardware_disable_all_nolock();
52930fce5623SAvi Kivity 			r = -EBUSY;
52940fce5623SAvi Kivity 		}
529535774a9fSSean Christopherson 	}
529635774a9fSSean Christopherson 
529735774a9fSSean Christopherson 	mutex_unlock(&kvm_lock);
529835774a9fSSean Christopherson 	cpus_read_unlock();
529935774a9fSSean Christopherson 
530035774a9fSSean Christopherson 	return r;
530135774a9fSSean Christopherson }
530235774a9fSSean Christopherson 
kvm_shutdown(void)530335774a9fSSean Christopherson static void kvm_shutdown(void)
530435774a9fSSean Christopherson {
530535774a9fSSean Christopherson 	/*
530635774a9fSSean Christopherson 	 * Disable hardware virtualization and set kvm_rebooting to indicate
530735774a9fSSean Christopherson 	 * that KVM has asynchronously disabled hardware virtualization, i.e.
530835774a9fSSean Christopherson 	 * that relevant errors and exceptions aren't entirely unexpected.
530935774a9fSSean Christopherson 	 * Some flavors of hardware virtualization need to be disabled before
531035774a9fSSean Christopherson 	 * transferring control to firmware (to perform shutdown/reboot), e.g.
531135774a9fSSean Christopherson 	 * on x86, virtualization can block INIT interrupts, which are used by
531235774a9fSSean Christopherson 	 * firmware to pull APs back under firmware control.  Note, this path
531335774a9fSSean Christopherson 	 * is used for both shutdown and reboot scenarios, i.e. neither name is
531435774a9fSSean Christopherson 	 * 100% comprehensive.
531535774a9fSSean Christopherson 	 */
531635774a9fSSean Christopherson 	pr_info("kvm: exiting hardware virtualization\n");
531735774a9fSSean Christopherson 	kvm_rebooting = true;
531835774a9fSSean Christopherson 	on_each_cpu(hardware_disable_nolock, NULL, 1);
531935774a9fSSean Christopherson }
532035774a9fSSean Christopherson 
kvm_suspend(void)532135774a9fSSean Christopherson static int kvm_suspend(void)
532235774a9fSSean Christopherson {
532335774a9fSSean Christopherson 	/*
532435774a9fSSean Christopherson 	 * Secondary CPUs and CPU hotplug are disabled across the suspend/resume
53256735150bSSean Christopherson 	 * callbacks, i.e. no need to acquire kvm_lock to ensure the usage count
532635774a9fSSean Christopherson 	 * is stable.  Assert that kvm_lock is not held to ensure the system
5327441f7bfaSSean Christopherson 	 * isn't suspended while KVM is enabling hardware.  Hardware enabling
5328441f7bfaSSean Christopherson 	 * can be preempted, but the task cannot be frozen until it has dropped
5329441f7bfaSSean Christopherson 	 * all locks (userspace tasks are frozen via a fake signal).
5330441f7bfaSSean Christopherson 	 */
5331441f7bfaSSean Christopherson 	lockdep_assert_not_held(&kvm_lock);
5332441f7bfaSSean Christopherson 	lockdep_assert_irqs_disabled();
5333441f7bfaSSean Christopherson 
5334441f7bfaSSean Christopherson 	if (kvm_usage_count)
5335441f7bfaSSean Christopherson 		hardware_disable_nolock(NULL);
5336441f7bfaSSean Christopherson 	return 0;
5337441f7bfaSSean Christopherson }
533835774a9fSSean Christopherson 
kvm_resume(void)53395ea5ca3cSWei Wang static void kvm_resume(void)
53405ea5ca3cSWei Wang {
53415ea5ca3cSWei Wang 	lockdep_assert_not_held(&kvm_lock);
53425ea5ca3cSWei Wang 	lockdep_assert_irqs_disabled();
53435ea5ca3cSWei Wang 
53445ea5ca3cSWei Wang 	if (kvm_usage_count)
5345e93f8a0fSMarcelo Tosatti 		WARN_ON_ONCE(__hardware_enable_nolock());
53460fce5623SAvi Kivity }
53470fce5623SAvi Kivity 
53480fce5623SAvi Kivity static struct syscore_ops kvm_syscore_ops = {
53490fce5623SAvi Kivity 	.suspend = kvm_suspend,
5350743eeb0bSSasha Levin 	.resume = kvm_resume,
53510fce5623SAvi Kivity 	.shutdown = kvm_shutdown,
53520fce5623SAvi Kivity };
53530fce5623SAvi Kivity #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
hardware_enable_all(void)5354e93f8a0fSMarcelo Tosatti static int hardware_enable_all(void)
53550fce5623SAvi Kivity {
53560fce5623SAvi Kivity 	return 0;
5357c21fbff1SPaolo Bonzini }
5358a343c9b7SPaolo Bonzini 
hardware_disable_all(void)5359743eeb0bSSasha Levin static void hardware_disable_all(void)
53608f4216c7SJason Wang {
53618f4216c7SJason Wang 
53628f4216c7SJason Wang }
53638f4216c7SJason Wang #endif /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
5364743eeb0bSSasha Levin 
kvm_iodevice_destructor(struct kvm_io_device * dev)53658f4216c7SJason Wang static void kvm_iodevice_destructor(struct kvm_io_device *dev)
53668f4216c7SJason Wang {
53678f4216c7SJason Wang 	if (dev->ops->destructor)
53688f4216c7SJason Wang 		dev->ops->destructor(dev);
53698f4216c7SJason Wang }
53708f4216c7SJason Wang 
kvm_io_bus_destroy(struct kvm_io_bus * bus)53718f4216c7SJason Wang static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
53728f4216c7SJason Wang {
53738f4216c7SJason Wang 	int i;
53748f4216c7SJason Wang 
53758f4216c7SJason Wang 	for (i = 0; i < bus->dev_count; i++) {
53768f4216c7SJason Wang 		struct kvm_io_device *pos = bus->range[i].dev;
5377743eeb0bSSasha Levin 
53788f4216c7SJason Wang 		kvm_iodevice_destructor(pos);
5379743eeb0bSSasha Levin 	}
5380743eeb0bSSasha Levin 	kfree(bus);
5381743eeb0bSSasha Levin }
5382a343c9b7SPaolo Bonzini 
kvm_io_bus_cmp(const struct kvm_io_range * r1,const struct kvm_io_range * r2)5383a343c9b7SPaolo Bonzini static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5384c21fbff1SPaolo Bonzini 				 const struct kvm_io_range *r2)
5385a343c9b7SPaolo Bonzini {
5386a343c9b7SPaolo Bonzini 	gpa_t addr1 = r1->addr;
538739369f7aSGeoff Levand 	gpa_t addr2 = r2->addr;
5388743eeb0bSSasha Levin 
5389743eeb0bSSasha Levin 	if (addr1 < addr2)
5390743eeb0bSSasha Levin 		return -1;
5391743eeb0bSSasha Levin 
5392743eeb0bSSasha Levin 	/* If r2->len == 0, match the exact address.  If r2->len != 0,
5393743eeb0bSSasha Levin 	 * accept any overlapping write.  Any order is acceptable for
5394743eeb0bSSasha Levin 	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5395743eeb0bSSasha Levin 	 * we process all of them.
5396743eeb0bSSasha Levin 	 */
5397743eeb0bSSasha Levin 	if (r2->len) {
5398743eeb0bSSasha Levin 		addr1 += r1->len;
5399743eeb0bSSasha Levin 		addr2 += r2->len;
5400743eeb0bSSasha Levin 	}
5401743eeb0bSSasha Levin 
5402743eeb0bSSasha Levin 	if (addr1 > addr2)
5403743eeb0bSSasha Levin 		return 1;
5404743eeb0bSSasha Levin 
5405c21fbff1SPaolo Bonzini 	return 0;
5406743eeb0bSSasha Levin }
5407743eeb0bSSasha Levin 
kvm_io_bus_sort_cmp(const void * p1,const void * p2)5408743eeb0bSSasha Levin static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5409743eeb0bSSasha Levin {
5410743eeb0bSSasha Levin 	return kvm_io_bus_cmp(p1, p2);
5411e32edf4fSNikolay Nikolaev }
5412126a5af5SCornelia Huck 
kvm_io_bus_get_first_dev(struct kvm_io_bus * bus,gpa_t addr,int len)5413126a5af5SCornelia Huck static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5414126a5af5SCornelia Huck 			     gpa_t addr, int len)
5415126a5af5SCornelia Huck {
5416126a5af5SCornelia Huck 	struct kvm_io_range *range, key;
5417126a5af5SCornelia Huck 	int off;
5418126a5af5SCornelia Huck 
5419126a5af5SCornelia Huck 	key = (struct kvm_io_range) {
5420126a5af5SCornelia Huck 		.addr = addr,
5421c21fbff1SPaolo Bonzini 		.len = len,
5422e32edf4fSNikolay Nikolaev 	};
5423126a5af5SCornelia Huck 
5424126a5af5SCornelia Huck 	range = bsearch(&key, bus->range, bus->dev_count,
5425126a5af5SCornelia Huck 			sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5426126a5af5SCornelia Huck 	if (range == NULL)
5427126a5af5SCornelia Huck 		return -ENOENT;
5428126a5af5SCornelia Huck 
5429126a5af5SCornelia Huck 	off = range - bus->range;
5430126a5af5SCornelia Huck 
5431bda9020eSMichael S. Tsirkin 	while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5432e32edf4fSNikolay Nikolaev 		off--;
5433bda9020eSMichael S. Tsirkin 
54340fce5623SAvi Kivity 	return off;
5435126a5af5SCornelia Huck }
5436126a5af5SCornelia Huck 
__kvm_io_bus_write(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,const void * val)5437126a5af5SCornelia Huck static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5438126a5af5SCornelia Huck 			      struct kvm_io_range *range, const void *val)
5439126a5af5SCornelia Huck {
5440126a5af5SCornelia Huck 	int idx;
5441126a5af5SCornelia Huck 
5442126a5af5SCornelia Huck 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5443126a5af5SCornelia Huck 	if (idx < 0)
5444e32edf4fSNikolay Nikolaev 		return -EOPNOTSUPP;
544590db1043SDavid Hildenbrand 
544690db1043SDavid Hildenbrand 	while (idx < bus->dev_count &&
5447e32edf4fSNikolay Nikolaev 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5448126a5af5SCornelia Huck 		if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5449126a5af5SCornelia Huck 					range->len, val))
5450a2420107SLeo Yan 			return idx;
5451126a5af5SCornelia Huck 		idx++;
5452126a5af5SCornelia Huck 	}
5453e32edf4fSNikolay Nikolaev 
5454e32edf4fSNikolay Nikolaev 	return -EOPNOTSUPP;
5455126a5af5SCornelia Huck }
545690d83dc3SLai Jiangshan 
5457743eeb0bSSasha Levin /* kvm_io_bus_write - called under kvm->slots_lock */
kvm_io_bus_write(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val)5458743eeb0bSSasha Levin int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5459743eeb0bSSasha Levin 		     int len, const void *val)
5460743eeb0bSSasha Levin {
5461743eeb0bSSasha Levin 	struct kvm_io_bus *bus;
5462743eeb0bSSasha Levin 	struct kvm_io_range range;
546390d83dc3SLai Jiangshan 	int r;
5464e32edf4fSNikolay Nikolaev 
546590db1043SDavid Hildenbrand 	range = (struct kvm_io_range) {
546690db1043SDavid Hildenbrand 		.addr = addr,
5467126a5af5SCornelia Huck 		.len = len,
5468126a5af5SCornelia Huck 	};
5469126a5af5SCornelia Huck 
5470c21fbff1SPaolo Bonzini 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5471e32edf4fSNikolay Nikolaev 	if (!bus)
5472126a5af5SCornelia Huck 		return -ENOMEM;
5473126a5af5SCornelia Huck 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
5474126a5af5SCornelia Huck 	return r < 0 ? r : 0;
5475126a5af5SCornelia Huck }
5476126a5af5SCornelia Huck EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5477126a5af5SCornelia Huck 
5478126a5af5SCornelia Huck /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
kvm_io_bus_write_cookie(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,const void * val,long cookie)5479e32edf4fSNikolay Nikolaev int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5480126a5af5SCornelia Huck 			    gpa_t addr, int len, const void *val, long cookie)
5481126a5af5SCornelia Huck {
5482e32edf4fSNikolay Nikolaev 	struct kvm_io_bus *bus;
5483e32edf4fSNikolay Nikolaev 	struct kvm_io_range range;
5484126a5af5SCornelia Huck 
5485126a5af5SCornelia Huck 	range = (struct kvm_io_range) {
5486126a5af5SCornelia Huck 		.addr = addr,
5487126a5af5SCornelia Huck 		.len = len,
5488743eeb0bSSasha Levin 	};
5489743eeb0bSSasha Levin 
5490743eeb0bSSasha Levin 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5491743eeb0bSSasha Levin 	if (!bus)
5492c21fbff1SPaolo Bonzini 		return -ENOMEM;
5493e32edf4fSNikolay Nikolaev 
5494126a5af5SCornelia Huck 	/* First try the device referenced by cookie. */
5495126a5af5SCornelia Huck 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
5496743eeb0bSSasha Levin 	    (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5497743eeb0bSSasha Levin 		if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5498743eeb0bSSasha Levin 					val))
5499bda9020eSMichael S. Tsirkin 			return cookie;
55000fce5623SAvi Kivity 
55010fce5623SAvi Kivity 	/*
5502bda9020eSMichael S. Tsirkin 	 * cookie contained garbage; fall back to search and return the
5503e32edf4fSNikolay Nikolaev 	 * correct cookie value.
5504e93f8a0fSMarcelo Tosatti 	 */
5505bda9020eSMichael S. Tsirkin 	return __kvm_io_bus_write(vcpu, bus, &range, val);
5506126a5af5SCornelia Huck }
5507126a5af5SCornelia Huck 
__kvm_io_bus_read(struct kvm_vcpu * vcpu,struct kvm_io_bus * bus,struct kvm_io_range * range,void * val)5508126a5af5SCornelia Huck static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5509126a5af5SCornelia Huck 			     struct kvm_io_range *range, void *val)
5510126a5af5SCornelia Huck {
5511126a5af5SCornelia Huck 	int idx;
5512126a5af5SCornelia Huck 
5513126a5af5SCornelia Huck 	idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5514126a5af5SCornelia Huck 	if (idx < 0)
5515e32edf4fSNikolay Nikolaev 		return -EOPNOTSUPP;
551690db1043SDavid Hildenbrand 
551790db1043SDavid Hildenbrand 	while (idx < bus->dev_count &&
5518e32edf4fSNikolay Nikolaev 		kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5519126a5af5SCornelia Huck 		if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5520126a5af5SCornelia Huck 				       range->len, val))
5521126a5af5SCornelia Huck 			return idx;
552279fac95eSMarcelo Tosatti 		idx++;
5523743eeb0bSSasha Levin 	}
5524743eeb0bSSasha Levin 
5525090b7affSGregory Haskins 	return -EOPNOTSUPP;
5526d4c67a7aSGal Hammer }
5527e93f8a0fSMarcelo Tosatti 
5528d4c67a7aSGal Hammer /* kvm_io_bus_read - called under kvm->slots_lock */
kvm_io_bus_read(struct kvm_vcpu * vcpu,enum kvm_bus bus_idx,gpa_t addr,int len,void * val)5529090b7affSGregory Haskins int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
55304a12f951SChristian Borntraeger 		    int len, void *val)
553190db1043SDavid Hildenbrand {
553290db1043SDavid Hildenbrand 	struct kvm_io_bus *bus;
553390db1043SDavid Hildenbrand 	struct kvm_io_range range;
55346ea34c9bSAmos Kong 	int r;
55356ea34c9bSAmos Kong 
5536090b7affSGregory Haskins 	range = (struct kvm_io_range) {
5537090b7affSGregory Haskins 		.addr = addr,
553890952cd3SGustavo A. R. Silva 		.len = len,
5539b12ce36aSBen Gardon 	};
5540e93f8a0fSMarcelo Tosatti 
5541e93f8a0fSMarcelo Tosatti 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5542d4c67a7aSGal Hammer 	if (!bus)
5543d4c67a7aSGal Hammer 		return -ENOMEM;
5544d4c67a7aSGal Hammer 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
5545d4c67a7aSGal Hammer 	return r < 0 ? r : 0;
5546d4c67a7aSGal Hammer }
5547d4c67a7aSGal Hammer 
5548d4c67a7aSGal Hammer /* Caller must hold slots_lock. */
kvm_io_bus_register_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr,int len,struct kvm_io_device * dev)5549d4c67a7aSGal Hammer int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5550d4c67a7aSGal Hammer 			    int len, struct kvm_io_device *dev)
5551d4c67a7aSGal Hammer {
5552d4c67a7aSGal Hammer 	int i;
5553d4c67a7aSGal Hammer 	struct kvm_io_bus *new_bus, *bus;
5554d4c67a7aSGal Hammer 	struct kvm_io_range range;
5555d4c67a7aSGal Hammer 
5556d4c67a7aSGal Hammer 	bus = kvm_get_bus(kvm, bus_idx);
5557d4c67a7aSGal Hammer 	if (!bus)
5558e93f8a0fSMarcelo Tosatti 		return -ENOMEM;
5559e93f8a0fSMarcelo Tosatti 
5560e93f8a0fSMarcelo Tosatti 	/* exclude ioeventfd which is limited by maximum fd */
5561090b7affSGregory Haskins 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5562090b7affSGregory Haskins 		return -ENOSPC;
5563090b7affSGregory Haskins 
5564090b7affSGregory Haskins 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
55655d3c4c79SSean Christopherson 			  GFP_KERNEL_ACCOUNT);
55666c474694SMichael S. Tsirkin 	if (!new_bus)
55676c474694SMichael S. Tsirkin 		return -ENOMEM;
55685ea5ca3cSWei Wang 
5569e93f8a0fSMarcelo Tosatti 	range = (struct kvm_io_range) {
55706c474694SMichael S. Tsirkin 		.addr = addr,
55717c896d37SSean Christopherson 		.len = len,
55727c896d37SSean Christopherson 		.dev = dev,
55734a12f951SChristian Borntraeger 	};
5574df630b8cSPeter Xu 
55755d3c4c79SSean Christopherson 	for (i = 0; i < bus->dev_count; i++)
5576df630b8cSPeter Xu 		if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
55777c896d37SSean Christopherson 			break;
5578a1300716SAmos Kong 
5579090b7affSGregory Haskins 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5580090b7affSGregory Haskins 	new_bus->dev_count++;
55817c896d37SSean Christopherson 	new_bus->range[i] = range;
5582e93f8a0fSMarcelo Tosatti 	memcpy(new_bus->range + i + 1, bus->range + i,
558390db1043SDavid Hildenbrand 		(bus->dev_count - i) * sizeof(struct kvm_io_range));
55845d3c4c79SSean Christopherson 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5585a1300716SAmos Kong 	synchronize_srcu_expedited(&kvm->srcu);
558690952cd3SGustavo A. R. Silva 	kfree(bus);
5587b12ce36aSBen Gardon 
5588f6588660SRustam Kovhaev 	return 0;
5589871c433bSRustam Kovhaev }
5590a1300716SAmos Kong 
kvm_io_bus_unregister_dev(struct kvm * kvm,enum kvm_bus bus_idx,struct kvm_io_device * dev)5591a1300716SAmos Kong int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5592871c433bSRustam Kovhaev 			      struct kvm_io_device *dev)
55932ee37574SSean Christopherson {
55942ee37574SSean Christopherson 	int i;
55952ee37574SSean Christopherson 	struct kvm_io_bus *new_bus, *bus;
55962ee37574SSean Christopherson 
55972ee37574SSean Christopherson 	lockdep_assert_held(&kvm->slots_lock);
55985ea5ca3cSWei Wang 
55995ea5ca3cSWei Wang 	bus = kvm_get_bus(kvm, bus_idx);
56005ea5ca3cSWei Wang 	if (!bus)
56015ea5ca3cSWei Wang 		return 0;
56022ee37574SSean Christopherson 
5603f6588660SRustam Kovhaev 	for (i = 0; i < bus->dev_count; i++) {
56045ea5ca3cSWei Wang 		if (bus->range[i].dev == dev) {
56055ea5ca3cSWei Wang 			break;
5606f6588660SRustam Kovhaev 		}
5607e93f8a0fSMarcelo Tosatti 	}
56085ea5ca3cSWei Wang 
5609e93f8a0fSMarcelo Tosatti 	if (i == bus->dev_count)
56105ea5ca3cSWei Wang 		return 0;
56110fce5623SAvi Kivity 
56120fce5623SAvi Kivity 	new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
56138a39d006SAndre Przywara 			  GFP_KERNEL_ACCOUNT);
56148a39d006SAndre Przywara 	if (new_bus) {
56158a39d006SAndre Przywara 		memcpy(new_bus, bus, struct_size(bus, range, i));
56168a39d006SAndre Przywara 		new_bus->dev_count--;
56178a39d006SAndre Przywara 		memcpy(new_bus->range + i, bus->range + i + 1,
56188a39d006SAndre Przywara 				flex_array_size(new_bus, range, new_bus->dev_count - i));
56198a39d006SAndre Przywara 	}
56208a39d006SAndre Przywara 
56218a39d006SAndre Przywara 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
56228a39d006SAndre Przywara 	synchronize_srcu_expedited(&kvm->srcu);
562390db1043SDavid Hildenbrand 
562490db1043SDavid Hildenbrand 	/*
56258a39d006SAndre Przywara 	 * If NULL bus is installed, destroy the old bus, including all the
56268a39d006SAndre Przywara 	 * attached devices. Otherwise, destroy the caller's device only.
56278a39d006SAndre Przywara 	 */
56288a39d006SAndre Przywara 	if (!new_bus) {
56298a39d006SAndre Przywara 		pr_err("kvm: failed to shrink bus, removing it completely\n");
56308a39d006SAndre Przywara 		kvm_io_bus_destroy(bus);
56318a39d006SAndre Przywara 		return -ENOMEM;
56328a39d006SAndre Przywara 	}
56338a39d006SAndre Przywara 
56348a39d006SAndre Przywara 	kvm_iodevice_destructor(dev);
56358a39d006SAndre Przywara 	kfree(bus);
56368a39d006SAndre Przywara 	return 0;
56378a39d006SAndre Przywara }
56388a39d006SAndre Przywara 
kvm_io_bus_get_dev(struct kvm * kvm,enum kvm_bus bus_idx,gpa_t addr)5639536a6f88SJanosch Frank struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5640536a6f88SJanosch Frank 					 gpa_t addr)
5641536a6f88SJanosch Frank {
5642536a6f88SJanosch Frank 	struct kvm_io_bus *bus;
5643180418e2SHou Wenlong 	int dev_idx, srcu_idx;
564414aa40a1SLi kunyu 	struct kvm_io_device *iodev = NULL;
5645536a6f88SJanosch Frank 
5646605c7130SPeter Xu 	srcu_idx = srcu_read_lock(&kvm->srcu);
5647605c7130SPeter Xu 
5648605c7130SPeter Xu 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
5649605c7130SPeter Xu 	if (!bus)
5650536a6f88SJanosch Frank 		goto out_unlock;
5651605c7130SPeter Xu 
5652536a6f88SJanosch Frank 	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
5653536a6f88SJanosch Frank 	if (dev_idx < 0)
5654180418e2SHou Wenlong 		goto out_unlock;
5655bc9e9e67SJing Zhang 
5656180418e2SHou Wenlong 	iodev = bus->range[dev_idx].dev;
5657180418e2SHou Wenlong 
5658536a6f88SJanosch Frank out_unlock:
5659536a6f88SJanosch Frank 	srcu_read_unlock(&kvm->srcu, srcu_idx);
5660180418e2SHou Wenlong 
5661536a6f88SJanosch Frank 	return iodev;
5662536a6f88SJanosch Frank }
5663536a6f88SJanosch Frank EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
5664536a6f88SJanosch Frank 
kvm_debugfs_open(struct inode * inode,struct file * file,int (* get)(void *,u64 *),int (* set)(void *,u64),const char * fmt)566514aa40a1SLi kunyu static int kvm_debugfs_open(struct inode *inode, struct file *file,
5666536a6f88SJanosch Frank 			   int (*get)(void *, u64 *), int (*set)(void *, u64),
5667536a6f88SJanosch Frank 			   const char *fmt)
5668536a6f88SJanosch Frank {
5669536a6f88SJanosch Frank 	int ret;
5670536a6f88SJanosch Frank 	struct kvm_stat_data *stat_data = inode->i_private;
5671536a6f88SJanosch Frank 
5672536a6f88SJanosch Frank 	/*
567309cbcef6SMilan Pandurov 	 * The debugfs files are a reference to the kvm struct which
5674536a6f88SJanosch Frank         * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
5675bc9e9e67SJing Zhang         * avoids the race between open and the removal of the debugfs directory.
5676536a6f88SJanosch Frank 	 */
5677536a6f88SJanosch Frank 	if (!kvm_get_kvm_safe(stat_data->kvm))
5678536a6f88SJanosch Frank 		return -ENOENT;
5679536a6f88SJanosch Frank 
568009cbcef6SMilan Pandurov 	ret = simple_attr_open(inode, file, get,
5681ce35ef27SSuraj Jitindar Singh 			       kvm_stats_debugfs_mode(stat_data->desc) & 0222
5682bc9e9e67SJing Zhang 			       ? set : NULL, fmt);
5683ce35ef27SSuraj Jitindar Singh 	if (ret)
5684ce35ef27SSuraj Jitindar Singh 		kvm_put_kvm(stat_data->kvm);
5685ce35ef27SSuraj Jitindar Singh 
5686ce35ef27SSuraj Jitindar Singh 	return ret;
568709cbcef6SMilan Pandurov }
5688536a6f88SJanosch Frank 
kvm_debugfs_release(struct inode * inode,struct file * file)568946808a4cSMarc Zyngier static int kvm_debugfs_release(struct inode *inode, struct file *file)
5690536a6f88SJanosch Frank {
5691536a6f88SJanosch Frank 	struct kvm_stat_data *stat_data = inode->i_private;
5692536a6f88SJanosch Frank 
5693536a6f88SJanosch Frank 	simple_attr_release(inode, file);
569409cbcef6SMilan Pandurov 	kvm_put_kvm(stat_data->kvm);
5695bc9e9e67SJing Zhang 
5696536a6f88SJanosch Frank 	return 0;
5697536a6f88SJanosch Frank }
5698536a6f88SJanosch Frank 
kvm_get_stat_per_vm(struct kvm * kvm,size_t offset,u64 * val)5699536a6f88SJanosch Frank static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
570009cbcef6SMilan Pandurov {
5701ce35ef27SSuraj Jitindar Singh 	*val = *(u64 *)((void *)(&kvm->stat) + offset);
570246808a4cSMarc Zyngier 
5703ce35ef27SSuraj Jitindar Singh 	return 0;
5704ce35ef27SSuraj Jitindar Singh }
570509cbcef6SMilan Pandurov 
kvm_clear_stat_per_vm(struct kvm * kvm,size_t offset)5706bc9e9e67SJing Zhang static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
570709cbcef6SMilan Pandurov {
570809cbcef6SMilan Pandurov 	*(u64 *)((void *)(&kvm->stat) + offset) = 0;
570909cbcef6SMilan Pandurov 
571009cbcef6SMilan Pandurov 	return 0;
571109cbcef6SMilan Pandurov }
571209cbcef6SMilan Pandurov 
kvm_get_stat_per_vcpu(struct kvm * kvm,size_t offset,u64 * val)571309cbcef6SMilan Pandurov static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
571414aa40a1SLi kunyu {
571509cbcef6SMilan Pandurov 	unsigned long i;
5716bc9e9e67SJing Zhang 	struct kvm_vcpu *vcpu;
571709cbcef6SMilan Pandurov 
571809cbcef6SMilan Pandurov 	*val = 0;
5719bc9e9e67SJing Zhang 
572009cbcef6SMilan Pandurov 	kvm_for_each_vcpu(i, vcpu, kvm)
572109cbcef6SMilan Pandurov 		*val += *(u64 *)((void *)(&vcpu->stat) + offset);
572209cbcef6SMilan Pandurov 
5723bc9e9e67SJing Zhang 	return 0;
572409cbcef6SMilan Pandurov }
572509cbcef6SMilan Pandurov 
kvm_clear_stat_per_vcpu(struct kvm * kvm,size_t offset)572609cbcef6SMilan Pandurov static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
572709cbcef6SMilan Pandurov {
572809cbcef6SMilan Pandurov 	unsigned long i;
572909cbcef6SMilan Pandurov 	struct kvm_vcpu *vcpu;
573009cbcef6SMilan Pandurov 
573109cbcef6SMilan Pandurov 	kvm_for_each_vcpu(i, vcpu, kvm)
573209cbcef6SMilan Pandurov 		*(u64 *)((void *)(&vcpu->stat) + offset) = 0;
573314aa40a1SLi kunyu 
573409cbcef6SMilan Pandurov 	return 0;
5735ce35ef27SSuraj Jitindar Singh }
5736ce35ef27SSuraj Jitindar Singh 
kvm_stat_data_get(void * data,u64 * val)5737ce35ef27SSuraj Jitindar Singh static int kvm_stat_data_get(void *data, u64 *val)
5738bc9e9e67SJing Zhang {
573909cbcef6SMilan Pandurov 	int r = -EFAULT;
574009cbcef6SMilan Pandurov 	struct kvm_stat_data *stat_data = data;
5741bc9e9e67SJing Zhang 
574209cbcef6SMilan Pandurov 	switch (stat_data->kind) {
574309cbcef6SMilan Pandurov 	case KVM_STAT_VM:
574409cbcef6SMilan Pandurov 		r = kvm_get_stat_per_vm(stat_data->kvm,
5745bc9e9e67SJing Zhang 					stat_data->desc->desc.offset, val);
574609cbcef6SMilan Pandurov 		break;
5747ce35ef27SSuraj Jitindar Singh 	case KVM_STAT_VCPU:
5748ce35ef27SSuraj Jitindar Singh 		r = kvm_get_stat_per_vcpu(stat_data->kvm,
574909cbcef6SMilan Pandurov 					  stat_data->desc->desc.offset, val);
575009cbcef6SMilan Pandurov 		break;
575109cbcef6SMilan Pandurov 	}
575209cbcef6SMilan Pandurov 
5753536a6f88SJanosch Frank 	return r;
5754536a6f88SJanosch Frank }
575509cbcef6SMilan Pandurov 
kvm_stat_data_clear(void * data,u64 val)575609cbcef6SMilan Pandurov static int kvm_stat_data_clear(void *data, u64 val)
5757536a6f88SJanosch Frank {
5758536a6f88SJanosch Frank 	int r = -EFAULT;
575909cbcef6SMilan Pandurov 	struct kvm_stat_data *stat_data = data;
5760536a6f88SJanosch Frank 
576109cbcef6SMilan Pandurov 	if (val)
5762536a6f88SJanosch Frank 		return -EINVAL;
5763536a6f88SJanosch Frank 
5764536a6f88SJanosch Frank 	switch (stat_data->kind) {
57653bed8888SGeliang Tang 	case KVM_STAT_VM:
5766536a6f88SJanosch Frank 		r = kvm_clear_stat_per_vm(stat_data->kvm,
5767536a6f88SJanosch Frank 					  stat_data->desc->desc.offset);
57688b88b099SChristoph Hellwig 		break;
57690fce5623SAvi Kivity 	case KVM_STAT_VCPU:
57700fce5623SAvi Kivity 		r = kvm_clear_stat_per_vcpu(stat_data->kvm,
57710fce5623SAvi Kivity 					    stat_data->desc->desc.offset);
5772536a6f88SJanosch Frank 		break;
57730fce5623SAvi Kivity 	}
57748b88b099SChristoph Hellwig 
57750d9ce162SJunaid Shahid 	return r;
5776536a6f88SJanosch Frank }
577709cbcef6SMilan Pandurov 
kvm_stat_data_open(struct inode * inode,struct file * file)5778536a6f88SJanosch Frank static int kvm_stat_data_open(struct inode *inode, struct file *file)
5779536a6f88SJanosch Frank {
57800d9ce162SJunaid Shahid 	__simple_attr_check_format("%llu\n", 0ull);
57818b88b099SChristoph Hellwig 	return kvm_debugfs_open(inode, file, kvm_stat_data_get,
57820fce5623SAvi Kivity 				kvm_stat_data_clear, "%llu\n");
57830fce5623SAvi Kivity }
5784ce35ef27SSuraj Jitindar Singh 
5785ce35ef27SSuraj Jitindar Singh static const struct file_operations stat_fops_per_vm = {
5786ce35ef27SSuraj Jitindar Singh 	.owner = THIS_MODULE,
5787ce35ef27SSuraj Jitindar Singh 	.open = kvm_stat_data_open,
5788ce35ef27SSuraj Jitindar Singh 	.release = kvm_debugfs_release,
5789ce35ef27SSuraj Jitindar Singh 	.read = simple_attr_read,
5790ce35ef27SSuraj Jitindar Singh 	.write = simple_attr_write,
5791ce35ef27SSuraj Jitindar Singh 	.llseek = no_llseek,
57920d9ce162SJunaid Shahid };
5793ce35ef27SSuraj Jitindar Singh 
vm_stat_get(void * _offset,u64 * val)579409cbcef6SMilan Pandurov static int vm_stat_get(void *_offset, u64 *val)
5795ce35ef27SSuraj Jitindar Singh {
57960d9ce162SJunaid Shahid 	unsigned offset = (long)_offset;
5797ce35ef27SSuraj Jitindar Singh 	struct kvm *kvm;
5798ce35ef27SSuraj Jitindar Singh 	u64 tmp_val;
5799ce35ef27SSuraj Jitindar Singh 
5800ce35ef27SSuraj Jitindar Singh 	*val = 0;
5801ce35ef27SSuraj Jitindar Singh 	mutex_lock(&kvm_lock);
5802bc9e9e67SJing Zhang 	list_for_each_entry(kvm, &vm_list, vm_list) {
58030fce5623SAvi Kivity 		kvm_get_stat_per_vm(kvm, offset, &tmp_val);
58048b88b099SChristoph Hellwig 		*val += tmp_val;
58050fce5623SAvi Kivity 	}
58060fce5623SAvi Kivity 	mutex_unlock(&kvm_lock);
58070fce5623SAvi Kivity 	return 0;
5808536a6f88SJanosch Frank }
58090fce5623SAvi Kivity 
vm_stat_clear(void * _offset,u64 val)58108b88b099SChristoph Hellwig static int vm_stat_clear(void *_offset, u64 val)
58110d9ce162SJunaid Shahid {
5812536a6f88SJanosch Frank 	unsigned offset = (long)_offset;
581309cbcef6SMilan Pandurov 	struct kvm *kvm;
5814536a6f88SJanosch Frank 
5815536a6f88SJanosch Frank 	if (val)
58160d9ce162SJunaid Shahid 		return -EINVAL;
58178b88b099SChristoph Hellwig 
58180fce5623SAvi Kivity 	mutex_lock(&kvm_lock);
58190fce5623SAvi Kivity 	list_for_each_entry(kvm, &vm_list, vm_list) {
5820ce35ef27SSuraj Jitindar Singh 		kvm_clear_stat_per_vm(kvm, offset);
5821ce35ef27SSuraj Jitindar Singh 	}
5822ce35ef27SSuraj Jitindar Singh 	mutex_unlock(&kvm_lock);
5823ce35ef27SSuraj Jitindar Singh 
5824ce35ef27SSuraj Jitindar Singh 	return 0;
5825ce35ef27SSuraj Jitindar Singh }
5826ce35ef27SSuraj Jitindar Singh 
5827ce35ef27SSuraj Jitindar Singh DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
58280d9ce162SJunaid Shahid DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
5829ce35ef27SSuraj Jitindar Singh 
vcpu_stat_get(void * _offset,u64 * val)583009cbcef6SMilan Pandurov static int vcpu_stat_get(void *_offset, u64 *val)
5831ce35ef27SSuraj Jitindar Singh {
58320d9ce162SJunaid Shahid 	unsigned offset = (long)_offset;
5833ce35ef27SSuraj Jitindar Singh 	struct kvm *kvm;
5834ce35ef27SSuraj Jitindar Singh 	u64 tmp_val;
5835ce35ef27SSuraj Jitindar Singh 
5836ce35ef27SSuraj Jitindar Singh 	*val = 0;
5837ce35ef27SSuraj Jitindar Singh 	mutex_lock(&kvm_lock);
5838ce35ef27SSuraj Jitindar Singh 	list_for_each_entry(kvm, &vm_list, vm_list) {
5839bc9e9e67SJing Zhang 		kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
58400fce5623SAvi Kivity 		*val += tmp_val;
5841286de8f6SClaudio Imbrenda 	}
5842286de8f6SClaudio Imbrenda 	mutex_unlock(&kvm_lock);
5843286de8f6SClaudio Imbrenda 	return 0;
5844286de8f6SClaudio Imbrenda }
5845286de8f6SClaudio Imbrenda 
vcpu_stat_clear(void * _offset,u64 val)5846286de8f6SClaudio Imbrenda static int vcpu_stat_clear(void *_offset, u64 val)
5847286de8f6SClaudio Imbrenda {
5848286de8f6SClaudio Imbrenda 	unsigned offset = (long)_offset;
58490d9ce162SJunaid Shahid 	struct kvm *kvm;
5850286de8f6SClaudio Imbrenda 
5851286de8f6SClaudio Imbrenda 	if (val)
5852286de8f6SClaudio Imbrenda 		return -EINVAL;
5853286de8f6SClaudio Imbrenda 
5854286de8f6SClaudio Imbrenda 	mutex_lock(&kvm_lock);
5855286de8f6SClaudio Imbrenda 	list_for_each_entry(kvm, &vm_list, vm_list) {
5856286de8f6SClaudio Imbrenda 		kvm_clear_stat_per_vcpu(kvm, offset);
5857286de8f6SClaudio Imbrenda 	}
58580d9ce162SJunaid Shahid 	mutex_unlock(&kvm_lock);
5859286de8f6SClaudio Imbrenda 
5860b12ce36aSBen Gardon 	return 0;
5861286de8f6SClaudio Imbrenda }
5862286de8f6SClaudio Imbrenda 
5863286de8f6SClaudio Imbrenda DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
5864286de8f6SClaudio Imbrenda 			"%llu\n");
5865286de8f6SClaudio Imbrenda DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
5866286de8f6SClaudio Imbrenda 
kvm_uevent_notify_change(unsigned int type,struct kvm * kvm)5867fdeaf7e3SClaudio Imbrenda static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
5868286de8f6SClaudio Imbrenda {
5869fdeaf7e3SClaudio Imbrenda 	struct kobj_uevent_env *env;
5870fdeaf7e3SClaudio Imbrenda 	unsigned long long created, active;
5871286de8f6SClaudio Imbrenda 
5872fdeaf7e3SClaudio Imbrenda 	if (!kvm_dev.this_device || !kvm)
5873fdeaf7e3SClaudio Imbrenda 		return;
5874286de8f6SClaudio Imbrenda 
5875a44a4cc1SOliver Upton 	mutex_lock(&kvm_lock);
5876b12ce36aSBen Gardon 	if (type == KVM_EVENT_CREATE_VM) {
5877286de8f6SClaudio Imbrenda 		kvm_createvm_count++;
5878fdeaf7e3SClaudio Imbrenda 		kvm_active_vms++;
5879fdeaf7e3SClaudio Imbrenda 	} else if (type == KVM_EVENT_DESTROY_VM) {
5880fdeaf7e3SClaudio Imbrenda 		kvm_active_vms--;
5881fdeaf7e3SClaudio Imbrenda 	}
5882fdeaf7e3SClaudio Imbrenda 	created = kvm_createvm_count;
5883286de8f6SClaudio Imbrenda 	active = kvm_active_vms;
5884286de8f6SClaudio Imbrenda 	mutex_unlock(&kvm_lock);
5885286de8f6SClaudio Imbrenda 
5886286de8f6SClaudio Imbrenda 	env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
5887286de8f6SClaudio Imbrenda 	if (!env)
5888286de8f6SClaudio Imbrenda 		return;
5889286de8f6SClaudio Imbrenda 
5890286de8f6SClaudio Imbrenda 	add_uevent_var(env, "CREATED=%llu", created);
5891929f45e3SGreg Kroah-Hartman 	add_uevent_var(env, "COUNT=%llu", active);
58920fce5623SAvi Kivity 
5893bc9e9e67SJing Zhang 	if (type == KVM_EVENT_CREATE_VM) {
5894bc9e9e67SJing Zhang 		add_uevent_var(env, "EVENT=create");
5895bc9e9e67SJing Zhang 		kvm->userspace_pid = task_pid_nr(current);
58960fce5623SAvi Kivity 	} else if (type == KVM_EVENT_DESTROY_VM) {
589776f7c879SHollis Blanchard 		add_uevent_var(env, "EVENT=destroy");
58984f69b680SHamo 	}
5899bc9e9e67SJing Zhang 	add_uevent_var(env, "PID=%d", kvm->userspace_pid);
5900bc9e9e67SJing Zhang 
5901bc9e9e67SJing Zhang 	if (!IS_ERR(kvm->debugfs_dentry)) {
5902bc9e9e67SJing Zhang 		char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
5903bc9e9e67SJing Zhang 
5904bc9e9e67SJing Zhang 		if (p) {
5905bc9e9e67SJing Zhang 			tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
5906bc9e9e67SJing Zhang 			if (!IS_ERR(tmp))
5907bc9e9e67SJing Zhang 				add_uevent_var(env, "STATS_PATH=%s", tmp);
5908bc9e9e67SJing Zhang 			kfree(p);
5909bc9e9e67SJing Zhang 		}
5910bc9e9e67SJing Zhang 	}
5911bc9e9e67SJing Zhang 	/* no need for checks, since we are adding at most only 5 keys */
5912bc9e9e67SJing Zhang 	env->envp[env->envp_idx++] = NULL;
5913bc9e9e67SJing Zhang 	kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
5914bc9e9e67SJing Zhang 	kfree(env);
5915bc9e9e67SJing Zhang }
5916bc9e9e67SJing Zhang 
kvm_init_debug(void)5917bc9e9e67SJing Zhang static void kvm_init_debug(void)
5918bc9e9e67SJing Zhang {
59194f69b680SHamo 	const struct file_operations *fops;
59200fce5623SAvi Kivity 	const struct _kvm_stats_desc *pdesc;
59210fce5623SAvi Kivity 	int i;
59220fce5623SAvi Kivity 
59230fce5623SAvi Kivity 	kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
59240fce5623SAvi Kivity 
59250fce5623SAvi Kivity 	for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
59260fce5623SAvi Kivity 		pdesc = &kvm_vm_stats_desc[i];
59270fce5623SAvi Kivity 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
59280fce5623SAvi Kivity 			fops = &vm_stat_fops;
59290fce5623SAvi Kivity 		else
59300fce5623SAvi Kivity 			fops = &vm_stat_readonly_fops;
5931f95ef0cdSXiubo Li 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5932046ddeedSWanpeng Li 				kvm_debugfs_dir,
5933d73eb57bSWanpeng Li 				(void *)(long)pdesc->desc.offset, fops);
59340fce5623SAvi Kivity 	}
59357495e22bSPaolo Bonzini 
5936e790d9efSRadim Krčmář 	for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
59370fce5623SAvi Kivity 		pdesc = &kvm_vcpu_stats_desc[i];
59380fce5623SAvi Kivity 		if (kvm_stats_debugfs_mode(pdesc) & 0222)
59390fce5623SAvi Kivity 			fops = &vcpu_stat_fops;
59400fce5623SAvi Kivity 		else
59410fce5623SAvi Kivity 			fops = &vcpu_stat_readonly_fops;
59420fce5623SAvi Kivity 		debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
59430fce5623SAvi Kivity 				kvm_debugfs_dir,
59440fce5623SAvi Kivity 				(void *)(long)pdesc->desc.offset, fops);
59453ba9f93bSPeter Zijlstra 	}
5946046ddeedSWanpeng Li }
5947d73eb57bSWanpeng Li 
5948d73eb57bSWanpeng Li static inline
preempt_notifier_to_vcpu(struct preempt_notifier * pn)59490fce5623SAvi Kivity struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
59507495e22bSPaolo Bonzini {
59517495e22bSPaolo Bonzini 	return container_of(pn, struct kvm_vcpu, preempt_notifier);
59527495e22bSPaolo Bonzini }
59537495e22bSPaolo Bonzini 
kvm_sched_in(struct preempt_notifier * pn,int cpu)59547495e22bSPaolo Bonzini static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
59551f03b2bcSMarc Zyngier {
59561f03b2bcSMarc Zyngier 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
59571f03b2bcSMarc Zyngier 
59581f03b2bcSMarc Zyngier 	WRITE_ONCE(vcpu->preempted, false);
59591f03b2bcSMarc Zyngier 	WRITE_ONCE(vcpu->ready, false);
59601f03b2bcSMarc Zyngier 
59617495e22bSPaolo Bonzini 	__this_cpu_write(kvm_running_vcpu, vcpu);
59627495e22bSPaolo Bonzini 	kvm_arch_sched_in(vcpu, cpu);
59637495e22bSPaolo Bonzini 	kvm_arch_vcpu_load(vcpu, cpu);
59641f03b2bcSMarc Zyngier }
59651f03b2bcSMarc Zyngier 
kvm_sched_out(struct preempt_notifier * pn,struct task_struct * next)59661f03b2bcSMarc Zyngier static void kvm_sched_out(struct preempt_notifier *pn,
59671f03b2bcSMarc Zyngier 			  struct task_struct *next)
59681f03b2bcSMarc Zyngier {
59691f03b2bcSMarc Zyngier 	struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
59701f03b2bcSMarc Zyngier 
59717495e22bSPaolo Bonzini 	if (current->on_rq) {
5972379a3c8eSWanpeng Li 		WRITE_ONCE(vcpu->preempted, true);
59737495e22bSPaolo Bonzini 		WRITE_ONCE(vcpu->ready, true);
59747495e22bSPaolo Bonzini 	}
59757495e22bSPaolo Bonzini 	kvm_arch_vcpu_put(vcpu);
59767495e22bSPaolo Bonzini 	__this_cpu_write(kvm_running_vcpu, NULL);
59777495e22bSPaolo Bonzini }
59787495e22bSPaolo Bonzini 
59797495e22bSPaolo Bonzini /**
59800fce5623SAvi Kivity  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
59810fce5623SAvi Kivity  *
5982e1bfc245SSean Christopherson  * We can disable preemption locally around accessing the per-CPU variable,
5983e1bfc245SSean Christopherson  * and use the resolved vcpu pointer after enabling preemption again,
5984e1bfc245SSean Christopherson  * because even if the current thread is migrated to another CPU, reading
5985e1bfc245SSean Christopherson  * the per-CPU value later will give us the same value as we update the
5986e1bfc245SSean Christopherson  * per-CPU variable in the preempt notifier handlers.
5987e1bfc245SSean Christopherson  */
kvm_get_running_vcpu(void)5988e1bfc245SSean Christopherson struct kvm_vcpu *kvm_get_running_vcpu(void)
5989e1bfc245SSean Christopherson {
5990e1bfc245SSean Christopherson 	struct kvm_vcpu *vcpu;
5991e1bfc245SSean Christopherson 
5992e1bfc245SSean Christopherson 	preempt_disable();
5993e1bfc245SSean Christopherson 	vcpu = __this_cpu_read(kvm_running_vcpu);
5994e1bfc245SSean Christopherson 	preempt_enable();
5995e1bfc245SSean Christopherson 
5996e1bfc245SSean Christopherson 	return vcpu;
5997e1bfc245SSean Christopherson }
5998e1bfc245SSean Christopherson EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
5999e1bfc245SSean Christopherson 
6000e1bfc245SSean Christopherson /**
6001e1bfc245SSean Christopherson  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
6002e1bfc245SSean Christopherson  */
kvm_get_running_vcpus(void)6003e1bfc245SSean Christopherson struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
6004e1bfc245SSean Christopherson {
6005e1bfc245SSean Christopherson         return &kvm_running_vcpu;
6006e1bfc245SSean Christopherson }
6007e1bfc245SSean Christopherson 
6008e1bfc245SSean Christopherson #ifdef CONFIG_GUEST_PERF_EVENTS
kvm_guest_state(void)6009e1bfc245SSean Christopherson static unsigned int kvm_guest_state(void)
6010e1bfc245SSean Christopherson {
6011e1bfc245SSean Christopherson 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6012e1bfc245SSean Christopherson 	unsigned int state;
6013e1bfc245SSean Christopherson 
6014e1bfc245SSean Christopherson 	if (!kvm_arch_pmi_in_guest(vcpu))
6015e1bfc245SSean Christopherson 		return 0;
6016e1bfc245SSean Christopherson 
6017e1bfc245SSean Christopherson 	state = PERF_GUEST_ACTIVE;
6018e1bfc245SSean Christopherson 	if (!kvm_arch_vcpu_in_kernel(vcpu))
6019e1bfc245SSean Christopherson 		state |= PERF_GUEST_USER;
6020e1bfc245SSean Christopherson 
6021e1bfc245SSean Christopherson 	return state;
6022e1bfc245SSean Christopherson }
6023e1bfc245SSean Christopherson 
kvm_guest_get_ip(void)6024e1bfc245SSean Christopherson static unsigned long kvm_guest_get_ip(void)
6025e1bfc245SSean Christopherson {
602681a1cf9fSSean Christopherson 	struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
6027f257d6dcSSean Christopherson 
60280fce5623SAvi Kivity 	/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
60290fce5623SAvi Kivity 	if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
60300fce5623SAvi Kivity 		return 0;
6031441f7bfaSSean Christopherson 
6032aaf12a7bSChao Gao 	return kvm_arch_vcpu_get_ip(vcpu);
6033aaf12a7bSChao Gao }
60340fce5623SAvi Kivity 
603537d25881SSean Christopherson static struct perf_guest_info_callbacks kvm_guest_cbs = {
603637d25881SSean Christopherson 	.state			= kvm_guest_state,
603735774a9fSSean Christopherson 	.get_ip			= kvm_guest_get_ip,
6038441f7bfaSSean Christopherson 	.handle_intel_pt_intr	= NULL,
60390fce5623SAvi Kivity };
60400fce5623SAvi Kivity 
kvm_register_perf_callbacks(unsigned int (* pt_intr_handler)(void))60410ee75beaSAvi Kivity void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
60420ee75beaSAvi Kivity {
604346515736SPaolo Bonzini 	kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
604446515736SPaolo Bonzini 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
604546515736SPaolo Bonzini }
kvm_unregister_perf_callbacks(void)604646515736SPaolo Bonzini void kvm_unregister_perf_callbacks(void)
6047ce55c049SJing Zhang {
6048ce55c049SJing Zhang 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
604946515736SPaolo Bonzini }
60500fce5623SAvi Kivity #endif
60510fce5623SAvi Kivity 
kvm_init(unsigned vcpu_size,unsigned vcpu_align,struct module * module)60529f1a4c00SSean Christopherson int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
60530fce5623SAvi Kivity {
60540fce5623SAvi Kivity 	int r;
6055baff59ccSVitaly Kuznetsov 	int cpu;
6056baff59ccSVitaly Kuznetsov 
6057baff59ccSVitaly Kuznetsov #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6058baff59ccSVitaly Kuznetsov 	r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_ONLINE, "kvm/cpu:online",
60599f1a4c00SSean Christopherson 				      kvm_online_cpu, kvm_offline_cpu);
6060baff59ccSVitaly Kuznetsov 	if (r)
6061baff59ccSVitaly Kuznetsov 		return r;
6062baff59ccSVitaly Kuznetsov 
60635910ccf0SSean Christopherson 	register_syscore_ops(&kvm_syscore_ops);
60645910ccf0SSean Christopherson #endif
60655910ccf0SSean Christopherson 
60665910ccf0SSean Christopherson 	/* A kmem cache lets us meet the alignment requirements of fx_save. */
6067af585b92SGleb Natapov 	if (!vcpu_align)
6068af585b92SGleb Natapov 		vcpu_align = __alignof__(struct kvm_vcpu);
60695910ccf0SSean Christopherson 	kvm_vcpu_cache =
6070af585b92SGleb Natapov 		kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
60710fce5623SAvi Kivity 					   SLAB_ACCOUNT,
60720fce5623SAvi Kivity 					   offsetof(struct kvm_vcpu, arch),
60730fce5623SAvi Kivity 					   offsetofend(struct kvm_vcpu, stats_id)
60740fce5623SAvi Kivity 					   - offsetof(struct kvm_vcpu, arch),
60750fce5623SAvi Kivity 					   NULL);
6076929f45e3SGreg Kroah-Hartman 	if (!kvm_vcpu_cache) {
60770ea4ed8eSDarrick J. Wong 		r = -ENOMEM;
60783c3c29fdSPaolo Bonzini 		goto err_vcpu_cache;
60792b012812SSean Christopherson 	}
60802b012812SSean Christopherson 
60812b012812SSean Christopherson 	for_each_possible_cpu(cpu) {
60822b012812SSean Christopherson 		if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
60832b012812SSean Christopherson 					    GFP_KERNEL, cpu_to_node(cpu))) {
60842b012812SSean Christopherson 			r = -ENOMEM;
60852b012812SSean Christopherson 			goto err_cpu_kick_mask;
60862b012812SSean Christopherson 		}
60872b012812SSean Christopherson 	}
60882b012812SSean Christopherson 
60892b012812SSean Christopherson 	r = kvm_irqfd_init();
60902b012812SSean Christopherson 	if (r)
60913c3c29fdSPaolo Bonzini 		goto err_irqfd;
60920fce5623SAvi Kivity 
60930fce5623SAvi Kivity 	r = kvm_async_pf_init();
60942b012812SSean Christopherson 	if (r)
60952b012812SSean Christopherson 		goto err_async_pf;
60962b012812SSean Christopherson 
6097af585b92SGleb Natapov 	kvm_chardev_ops.owner = module;
60985910ccf0SSean Christopherson 
60995910ccf0SSean Christopherson 	kvm_preempt_ops.sched_in = kvm_sched_in;
61005910ccf0SSean Christopherson 	kvm_preempt_ops.sched_out = kvm_sched_out;
61019f1a4c00SSean Christopherson 
6102baff59ccSVitaly Kuznetsov 	kvm_init_debug();
6103baff59ccSVitaly Kuznetsov 
61040fce5623SAvi Kivity 	r = kvm_vfio_ops_init();
61059f1a4c00SSean Christopherson 	if (WARN_ON_ONCE(r))
6106441f7bfaSSean Christopherson 		goto err_vfio;
610735774a9fSSean Christopherson 
6108aaf12a7bSChao Gao 	/*
6109441f7bfaSSean Christopherson 	 * Registration _must_ be the very last thing done, as this exposes
61100fce5623SAvi Kivity 	 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
61110fce5623SAvi Kivity 	 */
61120fce5623SAvi Kivity 	r = misc_register(&kvm_dev);
61130fce5623SAvi Kivity 	if (r) {
61140fce5623SAvi Kivity 		pr_err("kvm: misc device register failed\n");
61150fce5623SAvi Kivity 		goto err_register;
6116baff59ccSVitaly Kuznetsov 	}
6117baff59ccSVitaly Kuznetsov 
61182b012812SSean Christopherson 	return 0;
61192b012812SSean Christopherson 
61202b012812SSean Christopherson err_register:
61212b012812SSean Christopherson 	kvm_vfio_ops_exit();
61222b012812SSean Christopherson err_vfio:
61230fce5623SAvi Kivity 	kvm_async_pf_deinit();
61242b012812SSean Christopherson err_async_pf:
61252b012812SSean Christopherson 	kvm_irqfd_exit();
6126baff59ccSVitaly Kuznetsov err_irqfd:
6127baff59ccSVitaly Kuznetsov err_cpu_kick_mask:
61280fce5623SAvi Kivity 	for_each_possible_cpu(cpu)
612973b8dc04SSean Christopherson 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6130af585b92SGleb Natapov 	kmem_cache_destroy(kvm_vcpu_cache);
6131441f7bfaSSean Christopherson err_vcpu_cache:
6132fb3600ccSRafael J. Wysocki #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6133aaf12a7bSChao Gao 	unregister_syscore_ops(&kvm_syscore_ops);
6134441f7bfaSSean Christopherson 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
61355910ccf0SSean Christopherson #endif
61360fce5623SAvi Kivity 	return r;
61370fce5623SAvi Kivity }
6138c57c8046SJunaid Shahid EXPORT_SYMBOL_GPL(kvm_init);
6139c57c8046SJunaid Shahid 
kvm_exit(void)6140c57c8046SJunaid Shahid void kvm_exit(void)
6141c57c8046SJunaid Shahid {
6142c57c8046SJunaid Shahid 	int cpu;
6143c57c8046SJunaid Shahid 
6144c57c8046SJunaid Shahid 	/*
6145c57c8046SJunaid Shahid 	 * Note, unregistering /dev/kvm doesn't strictly need to come first,
6146c57c8046SJunaid Shahid 	 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
6147c57c8046SJunaid Shahid 	 * to KVM while the module is being stopped.
6148c57c8046SJunaid Shahid 	 */
6149c57c8046SJunaid Shahid 	misc_deregister(&kvm_dev);
6150c57c8046SJunaid Shahid 
6151c57c8046SJunaid Shahid 	debugfs_remove_recursive(kvm_debugfs_dir);
6152c57c8046SJunaid Shahid 	for_each_possible_cpu(cpu)
6153c57c8046SJunaid Shahid 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
6154c57c8046SJunaid Shahid 	kmem_cache_destroy(kvm_vcpu_cache);
6155e45cce30SVipin Sharma 	kvm_vfio_ops_exit();
6156c57c8046SJunaid Shahid 	kvm_async_pf_deinit();
6157c57c8046SJunaid Shahid #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
6158c57c8046SJunaid Shahid 	unregister_syscore_ops(&kvm_syscore_ops);
6159c57c8046SJunaid Shahid 	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
6160c57c8046SJunaid Shahid #endif
6161c57c8046SJunaid Shahid 	kvm_irqfd_exit();
6162c57c8046SJunaid Shahid }
6163c57c8046SJunaid Shahid EXPORT_SYMBOL_GPL(kvm_exit);
6164c57c8046SJunaid Shahid 
6165c57c8046SJunaid Shahid struct kvm_vm_worker_thread_context {
6166c57c8046SJunaid Shahid 	struct kvm *kvm;
6167c57c8046SJunaid Shahid 	struct task_struct *parent;
6168c57c8046SJunaid Shahid 	struct completion init_done;
6169c57c8046SJunaid Shahid 	kvm_vm_thread_fn_t thread_fn;
6170c57c8046SJunaid Shahid 	uintptr_t data;
6171c57c8046SJunaid Shahid 	int err;
6172c57c8046SJunaid Shahid };
6173c57c8046SJunaid Shahid 
kvm_vm_worker_thread(void * context)6174c57c8046SJunaid Shahid static int kvm_vm_worker_thread(void *context)
6175c57c8046SJunaid Shahid {
6176c57c8046SJunaid Shahid 	/*
6177c57c8046SJunaid Shahid 	 * The init_context is allocated on the stack of the parent thread, so
6178c57c8046SJunaid Shahid 	 * we have to locally copy anything that is needed beyond initialization
6179c57c8046SJunaid Shahid 	 */
6180c57c8046SJunaid Shahid 	struct kvm_vm_worker_thread_context *init_context = context;
6181c57c8046SJunaid Shahid 	struct task_struct *parent;
6182e45cce30SVipin Sharma 	struct kvm *kvm = init_context->kvm;
6183c57c8046SJunaid Shahid 	kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
6184c57c8046SJunaid Shahid 	uintptr_t data = init_context->data;
6185c57c8046SJunaid Shahid 	int err;
6186c57c8046SJunaid Shahid 
6187c57c8046SJunaid Shahid 	err = kthread_park(current);
6188c57c8046SJunaid Shahid 	/* kthread_park(current) is never supposed to return an error */
6189c57c8046SJunaid Shahid 	WARN_ON(err != 0);
6190e45cce30SVipin Sharma 	if (err)
6191e45cce30SVipin Sharma 		goto init_complete;
6192e45cce30SVipin Sharma 
6193e45cce30SVipin Sharma 	err = cgroup_attach_task_all(init_context->parent, current);
6194e45cce30SVipin Sharma 	if (err) {
6195e45cce30SVipin Sharma 		kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
6196e45cce30SVipin Sharma 			__func__, err);
6197e45cce30SVipin Sharma 		goto init_complete;
6198e45cce30SVipin Sharma 	}
6199e45cce30SVipin Sharma 
6200e45cce30SVipin Sharma 	set_user_nice(current, task_nice(init_context->parent));
6201e45cce30SVipin Sharma 
6202e45cce30SVipin Sharma init_complete:
6203e45cce30SVipin Sharma 	init_context->err = err;
6204e45cce30SVipin Sharma 	complete(&init_context->init_done);
6205e45cce30SVipin Sharma 	init_context = NULL;
6206e45cce30SVipin Sharma 
6207e45cce30SVipin Sharma 	if (err)
6208e45cce30SVipin Sharma 		goto out;
6209c57c8046SJunaid Shahid 
6210c57c8046SJunaid Shahid 	/* Wait to be woken up by the spawner before proceeding. */
6211c57c8046SJunaid Shahid 	kthread_parkme();
6212c57c8046SJunaid Shahid 
6213c57c8046SJunaid Shahid 	if (!kthread_should_stop())
6214c57c8046SJunaid Shahid 		err = thread_fn(kvm, data);
6215c57c8046SJunaid Shahid 
6216c57c8046SJunaid Shahid out:
6217c57c8046SJunaid Shahid 	/*
6218c57c8046SJunaid Shahid 	 * Move kthread back to its original cgroup to prevent it lingering in
6219c57c8046SJunaid Shahid 	 * the cgroup of the VM process, after the latter finishes its
6220c57c8046SJunaid Shahid 	 * execution.
6221c57c8046SJunaid Shahid 	 *
6222c57c8046SJunaid Shahid 	 * kthread_stop() waits on the 'exited' completion condition which is
6223c57c8046SJunaid Shahid 	 * set in exit_mm(), via mm_release(), in do_exit(). However, the
6224c57c8046SJunaid Shahid 	 * kthread is removed from the cgroup in the cgroup_exit() which is
6225c57c8046SJunaid Shahid 	 * called after the exit_mm(). This causes the kthread_stop() to return
6226c57c8046SJunaid Shahid 	 * before the kthread actually quits the cgroup.
6227c57c8046SJunaid Shahid 	 */
6228c57c8046SJunaid Shahid 	rcu_read_lock();
6229c57c8046SJunaid Shahid 	parent = rcu_dereference(current->real_parent);
6230c57c8046SJunaid Shahid 	get_task_struct(parent);
6231c57c8046SJunaid Shahid 	rcu_read_unlock();
6232c57c8046SJunaid Shahid 	cgroup_attach_task_all(parent, current);
6233c57c8046SJunaid Shahid 	put_task_struct(parent);
6234c57c8046SJunaid Shahid 
6235c57c8046SJunaid Shahid 	return err;
6236c57c8046SJunaid Shahid }
6237c57c8046SJunaid Shahid 
kvm_vm_create_worker_thread(struct kvm * kvm,kvm_vm_thread_fn_t thread_fn,uintptr_t data,const char * name,struct task_struct ** thread_ptr)6238c57c8046SJunaid Shahid int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6239c57c8046SJunaid Shahid 				uintptr_t data, const char *name,
6240c57c8046SJunaid Shahid 				struct task_struct **thread_ptr)
6241 {
6242 	struct kvm_vm_worker_thread_context init_context = {};
6243 	struct task_struct *thread;
6244 
6245 	*thread_ptr = NULL;
6246 	init_context.kvm = kvm;
6247 	init_context.parent = current;
6248 	init_context.thread_fn = thread_fn;
6249 	init_context.data = data;
6250 	init_completion(&init_context.init_done);
6251 
6252 	thread = kthread_run(kvm_vm_worker_thread, &init_context,
6253 			     "%s-%d", name, task_pid_nr(current));
6254 	if (IS_ERR(thread))
6255 		return PTR_ERR(thread);
6256 
6257 	/* kthread_run is never supposed to return NULL */
6258 	WARN_ON(thread == NULL);
6259 
6260 	wait_for_completion(&init_context.init_done);
6261 
6262 	if (!init_context.err)
6263 		*thread_ptr = thread;
6264 
6265 	return init_context.err;
6266 }
6267