xref: /openbmc/linux/include/linux/kvm_host.h (revision f2e106692d5189303997ad7b96de8d8123aa5613)
1edf88417SAvi Kivity #ifndef __KVM_HOST_H
2edf88417SAvi Kivity #define __KVM_HOST_H
3edf88417SAvi Kivity 
4edf88417SAvi Kivity /*
5edf88417SAvi Kivity  * This work is licensed under the terms of the GNU GPL, version 2.  See
6edf88417SAvi Kivity  * the COPYING file in the top-level directory.
7edf88417SAvi Kivity  */
8edf88417SAvi Kivity 
9edf88417SAvi Kivity #include <linux/types.h>
10edf88417SAvi Kivity #include <linux/hardirq.h>
11edf88417SAvi Kivity #include <linux/list.h>
12edf88417SAvi Kivity #include <linux/mutex.h>
13edf88417SAvi Kivity #include <linux/spinlock.h>
14edf88417SAvi Kivity #include <linux/signal.h>
15edf88417SAvi Kivity #include <linux/sched.h>
16187f1882SPaul Gortmaker #include <linux/bug.h>
17edf88417SAvi Kivity #include <linux/mm.h>
18b297e672SEric B Munson #include <linux/mmu_notifier.h>
19edf88417SAvi Kivity #include <linux/preempt.h>
200937c48dSSheng Yang #include <linux/msi.h>
21d89f5effSJan Kiszka #include <linux/slab.h>
22bd2b53b2SMichael S. Tsirkin #include <linux/rcupdate.h>
23bd80158aSJan Kiszka #include <linux/ratelimit.h>
2483f09228SXiao Guangrong #include <linux/err.h>
25c11f11fcSFrederic Weisbecker #include <linux/irqflags.h>
26521921baSFrederic Weisbecker #include <linux/context_tracking.h>
27edf88417SAvi Kivity #include <asm/signal.h>
28edf88417SAvi Kivity 
29edf88417SAvi Kivity #include <linux/kvm.h>
30edf88417SAvi Kivity #include <linux/kvm_para.h>
31edf88417SAvi Kivity 
32edf88417SAvi Kivity #include <linux/kvm_types.h>
33edf88417SAvi Kivity 
34edf88417SAvi Kivity #include <asm/kvm_host.h>
35edf88417SAvi Kivity 
36cef4dea0SAvi Kivity #ifndef KVM_MMIO_SIZE
37cef4dea0SAvi Kivity #define KVM_MMIO_SIZE 8
38cef4dea0SAvi Kivity #endif
39cef4dea0SAvi Kivity 
40edf88417SAvi Kivity /*
4167b29204SXiao Guangrong  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
4267b29204SXiao Guangrong  * in kvm, other bits are visible for userspace which are defined in
4367b29204SXiao Guangrong  * include/linux/kvm_h.
4467b29204SXiao Guangrong  */
4567b29204SXiao Guangrong #define KVM_MEMSLOT_INVALID	(1UL << 16)
4667b29204SXiao Guangrong 
4787da7e66SXiao Guangrong /* Two fragments for cross MMIO pages. */
4887da7e66SXiao Guangrong #define KVM_MAX_MMIO_FRAGMENTS	2
49f78146b0SAvi Kivity 
50f78146b0SAvi Kivity /*
519c5b1172SXiao Guangrong  * For the normal pfn, the highest 12 bits should be zero,
5281c52c56SXiao Guangrong  * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
5381c52c56SXiao Guangrong  * mask bit 63 to indicate the noslot pfn.
549c5b1172SXiao Guangrong  */
5581c52c56SXiao Guangrong #define KVM_PFN_ERR_MASK	(0x7ffULL << 52)
5681c52c56SXiao Guangrong #define KVM_PFN_ERR_NOSLOT_MASK	(0xfffULL << 52)
5781c52c56SXiao Guangrong #define KVM_PFN_NOSLOT		(0x1ULL << 63)
586c8ee57bSXiao Guangrong 
599c5b1172SXiao Guangrong #define KVM_PFN_ERR_FAULT	(KVM_PFN_ERR_MASK)
609c5b1172SXiao Guangrong #define KVM_PFN_ERR_HWPOISON	(KVM_PFN_ERR_MASK + 1)
6181c52c56SXiao Guangrong #define KVM_PFN_ERR_RO_FAULT	(KVM_PFN_ERR_MASK + 2)
629c5b1172SXiao Guangrong 
6381c52c56SXiao Guangrong /*
6481c52c56SXiao Guangrong  * error pfns indicate that the gfn is in slot but faild to
6581c52c56SXiao Guangrong  * translate it to pfn on host.
6681c52c56SXiao Guangrong  */
679c5b1172SXiao Guangrong static inline bool is_error_pfn(pfn_t pfn)
6883f09228SXiao Guangrong {
699c5b1172SXiao Guangrong 	return !!(pfn & KVM_PFN_ERR_MASK);
7083f09228SXiao Guangrong }
7183f09228SXiao Guangrong 
7281c52c56SXiao Guangrong /*
7381c52c56SXiao Guangrong  * error_noslot pfns indicate that the gfn can not be
7481c52c56SXiao Guangrong  * translated to pfn - it is not in slot or failed to
7581c52c56SXiao Guangrong  * translate it to pfn.
7681c52c56SXiao Guangrong  */
7781c52c56SXiao Guangrong static inline bool is_error_noslot_pfn(pfn_t pfn)
7883f09228SXiao Guangrong {
7981c52c56SXiao Guangrong 	return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
8083f09228SXiao Guangrong }
8183f09228SXiao Guangrong 
8281c52c56SXiao Guangrong /* noslot pfn indicates that the gfn is not in slot. */
8381c52c56SXiao Guangrong static inline bool is_noslot_pfn(pfn_t pfn)
8483f09228SXiao Guangrong {
8581c52c56SXiao Guangrong 	return pfn == KVM_PFN_NOSLOT;
8683f09228SXiao Guangrong }
8783f09228SXiao Guangrong 
88bf640876SDominik Dingel /*
89bf640876SDominik Dingel  * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
90bf640876SDominik Dingel  * provide own defines and kvm_is_error_hva
91bf640876SDominik Dingel  */
92bf640876SDominik Dingel #ifndef KVM_HVA_ERR_BAD
93bf640876SDominik Dingel 
94ca3a490cSXiao Guangrong #define KVM_HVA_ERR_BAD		(PAGE_OFFSET)
957068d097SXiao Guangrong #define KVM_HVA_ERR_RO_BAD	(PAGE_OFFSET + PAGE_SIZE)
96ca3a490cSXiao Guangrong 
97ca3a490cSXiao Guangrong static inline bool kvm_is_error_hva(unsigned long addr)
98ca3a490cSXiao Guangrong {
997068d097SXiao Guangrong 	return addr >= PAGE_OFFSET;
100ca3a490cSXiao Guangrong }
101ca3a490cSXiao Guangrong 
102bf640876SDominik Dingel #endif
103bf640876SDominik Dingel 
1046cede2e6SXiao Guangrong #define KVM_ERR_PTR_BAD_PAGE	(ERR_PTR(-ENOENT))
1056cede2e6SXiao Guangrong 
1069c5b1172SXiao Guangrong static inline bool is_error_page(struct page *page)
1076cede2e6SXiao Guangrong {
1086cede2e6SXiao Guangrong 	return IS_ERR(page);
1096cede2e6SXiao Guangrong }
1106cede2e6SXiao Guangrong 
111edf88417SAvi Kivity /*
112edf88417SAvi Kivity  * vcpu->requests bit members
113edf88417SAvi Kivity  */
114edf88417SAvi Kivity #define KVM_REQ_TLB_FLUSH          0
1152f52d58cSAvi Kivity #define KVM_REQ_MIGRATE_TIMER      1
116b209749fSAvi Kivity #define KVM_REQ_REPORT_TPR_ACCESS  2
1172e53d63aSMarcelo Tosatti #define KVM_REQ_MMU_RELOAD         3
11871c4dfafSJoerg Roedel #define KVM_REQ_TRIPLE_FAULT       4
11906e05645SMarcelo Tosatti #define KVM_REQ_PENDING_TIMER      5
120d7690175SMarcelo Tosatti #define KVM_REQ_UNHALT             6
1214731d4c7SMarcelo Tosatti #define KVM_REQ_MMU_SYNC           7
12234c238a1SZachary Amsden #define KVM_REQ_CLOCK_UPDATE       8
12332f88400SMarcelo Tosatti #define KVM_REQ_KICK               9
12402daab21SAvi Kivity #define KVM_REQ_DEACTIVATE_FPU    10
1253842d135SAvi Kivity #define KVM_REQ_EVENT             11
126af585b92SGleb Natapov #define KVM_REQ_APF_HALT          12
127c9aaa895SGlauber Costa #define KVM_REQ_STEAL_UPDATE      13
1287460fb4aSAvi Kivity #define KVM_REQ_NMI               14
129730dca42SJan Kiszka #define KVM_REQ_PMU               15
130730dca42SJan Kiszka #define KVM_REQ_PMI               16
131730dca42SJan Kiszka #define KVM_REQ_WATCHDOG          17
132730dca42SJan Kiszka #define KVM_REQ_MASTERCLOCK_UPDATE 18
133730dca42SJan Kiszka #define KVM_REQ_MCLOCK_INPROGRESS 19
134730dca42SJan Kiszka #define KVM_REQ_EPR_EXIT          20
135730dca42SJan Kiszka #define KVM_REQ_SCAN_IOAPIC       21
1360061d53dSMarcelo Tosatti #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
137edf88417SAvi Kivity 
1385550af4dSSheng Yang #define KVM_USERSPACE_IRQ_SOURCE_ID		0
1397a84428aSAlex Williamson #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
1405550af4dSSheng Yang 
1416c474694SMichael S. Tsirkin struct kvm;
142edf88417SAvi Kivity struct kvm_vcpu;
143edf88417SAvi Kivity extern struct kmem_cache *kvm_vcpu_cache;
144edf88417SAvi Kivity 
1452f303b74SPaolo Bonzini extern spinlock_t kvm_lock;
146fc1b7492SGeoff Levand extern struct list_head vm_list;
147fc1b7492SGeoff Levand 
148743eeb0bSSasha Levin struct kvm_io_range {
149743eeb0bSSasha Levin 	gpa_t addr;
150743eeb0bSSasha Levin 	int len;
151743eeb0bSSasha Levin 	struct kvm_io_device *dev;
152743eeb0bSSasha Levin };
153743eeb0bSSasha Levin 
154786a9f88SAmos Kong #define NR_IOBUS_DEVS 1000
155a1300716SAmos Kong 
156edf88417SAvi Kivity struct kvm_io_bus {
157edf88417SAvi Kivity 	int dev_count;
1586ea34c9bSAmos Kong 	int ioeventfd_count;
159a1300716SAmos Kong 	struct kvm_io_range range[];
160edf88417SAvi Kivity };
161edf88417SAvi Kivity 
162e93f8a0fSMarcelo Tosatti enum kvm_bus {
163e93f8a0fSMarcelo Tosatti 	KVM_MMIO_BUS,
164e93f8a0fSMarcelo Tosatti 	KVM_PIO_BUS,
165060f0ce6SCornelia Huck 	KVM_VIRTIO_CCW_NOTIFY_BUS,
166e93f8a0fSMarcelo Tosatti 	KVM_NR_BUSES
167e93f8a0fSMarcelo Tosatti };
168e93f8a0fSMarcelo Tosatti 
169e93f8a0fSMarcelo Tosatti int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
170e93f8a0fSMarcelo Tosatti 		     int len, const void *val);
171126a5af5SCornelia Huck int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
172126a5af5SCornelia Huck 			    int len, const void *val, long cookie);
173e93f8a0fSMarcelo Tosatti int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
174bda9020eSMichael S. Tsirkin 		    void *val);
175126a5af5SCornelia Huck int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
176126a5af5SCornelia Huck 			   int len, void *val, long cookie);
177743eeb0bSSasha Levin int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
178743eeb0bSSasha Levin 			    int len, struct kvm_io_device *dev);
179e93f8a0fSMarcelo Tosatti int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
180edf88417SAvi Kivity 			      struct kvm_io_device *dev);
181edf88417SAvi Kivity 
182af585b92SGleb Natapov #ifdef CONFIG_KVM_ASYNC_PF
183af585b92SGleb Natapov struct kvm_async_pf {
184af585b92SGleb Natapov 	struct work_struct work;
185af585b92SGleb Natapov 	struct list_head link;
186af585b92SGleb Natapov 	struct list_head queue;
187af585b92SGleb Natapov 	struct kvm_vcpu *vcpu;
188af585b92SGleb Natapov 	struct mm_struct *mm;
189af585b92SGleb Natapov 	gva_t gva;
190af585b92SGleb Natapov 	unsigned long addr;
191af585b92SGleb Natapov 	struct kvm_arch_async_pf arch;
192*f2e10669Schai wen 	bool   wakeup_all;
193af585b92SGleb Natapov };
194af585b92SGleb Natapov 
195af585b92SGleb Natapov void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
196af585b92SGleb Natapov void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
197af585b92SGleb Natapov int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
198af585b92SGleb Natapov 		       struct kvm_arch_async_pf *arch);
199344d9588SGleb Natapov int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
200af585b92SGleb Natapov #endif
201af585b92SGleb Natapov 
2026b7e2d09SXiao Guangrong enum {
2036b7e2d09SXiao Guangrong 	OUTSIDE_GUEST_MODE,
2046b7e2d09SXiao Guangrong 	IN_GUEST_MODE,
205c142786cSAvi Kivity 	EXITING_GUEST_MODE,
206c142786cSAvi Kivity 	READING_SHADOW_PAGE_TABLES,
2076b7e2d09SXiao Guangrong };
2086b7e2d09SXiao Guangrong 
209f78146b0SAvi Kivity /*
210f78146b0SAvi Kivity  * Sometimes a large or cross-page mmio needs to be broken up into separate
211f78146b0SAvi Kivity  * exits for userspace servicing.
212f78146b0SAvi Kivity  */
213f78146b0SAvi Kivity struct kvm_mmio_fragment {
214f78146b0SAvi Kivity 	gpa_t gpa;
215f78146b0SAvi Kivity 	void *data;
216f78146b0SAvi Kivity 	unsigned len;
217f78146b0SAvi Kivity };
218f78146b0SAvi Kivity 
219edf88417SAvi Kivity struct kvm_vcpu {
220edf88417SAvi Kivity 	struct kvm *kvm;
22131bb117eSHollis Blanchard #ifdef CONFIG_PREEMPT_NOTIFIERS
222edf88417SAvi Kivity 	struct preempt_notifier preempt_notifier;
22331bb117eSHollis Blanchard #endif
224edf88417SAvi Kivity 	int cpu;
2256b7e2d09SXiao Guangrong 	int vcpu_id;
2266b7e2d09SXiao Guangrong 	int srcu_idx;
2276b7e2d09SXiao Guangrong 	int mode;
228edf88417SAvi Kivity 	unsigned long requests;
229d0bfb940SJan Kiszka 	unsigned long guest_debug;
2306b7e2d09SXiao Guangrong 
2316b7e2d09SXiao Guangrong 	struct mutex mutex;
2326b7e2d09SXiao Guangrong 	struct kvm_run *run;
233f656ce01SMarcelo Tosatti 
234edf88417SAvi Kivity 	int fpu_active;
2352acf923eSDexuan Cui 	int guest_fpu_loaded, guest_xcr0_loaded;
236edf88417SAvi Kivity 	wait_queue_head_t wq;
23734bb10b7SRik van Riel 	struct pid *pid;
238edf88417SAvi Kivity 	int sigset_active;
239edf88417SAvi Kivity 	sigset_t sigset;
240edf88417SAvi Kivity 	struct kvm_vcpu_stat stat;
241edf88417SAvi Kivity 
242edf88417SAvi Kivity #ifdef CONFIG_HAS_IOMEM
243edf88417SAvi Kivity 	int mmio_needed;
244edf88417SAvi Kivity 	int mmio_read_completed;
245edf88417SAvi Kivity 	int mmio_is_write;
246f78146b0SAvi Kivity 	int mmio_cur_fragment;
247f78146b0SAvi Kivity 	int mmio_nr_fragments;
248f78146b0SAvi Kivity 	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
249edf88417SAvi Kivity #endif
250edf88417SAvi Kivity 
251af585b92SGleb Natapov #ifdef CONFIG_KVM_ASYNC_PF
252af585b92SGleb Natapov 	struct {
253af585b92SGleb Natapov 		u32 queued;
254af585b92SGleb Natapov 		struct list_head queue;
255af585b92SGleb Natapov 		struct list_head done;
256af585b92SGleb Natapov 		spinlock_t lock;
257af585b92SGleb Natapov 	} async_pf;
258af585b92SGleb Natapov #endif
259af585b92SGleb Natapov 
2604c088493SRaghavendra K T #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2614c088493SRaghavendra K T 	/*
2624c088493SRaghavendra K T 	 * Cpu relax intercept or pause loop exit optimization
2634c088493SRaghavendra K T 	 * in_spin_loop: set when a vcpu does a pause loop exit
2644c088493SRaghavendra K T 	 *  or cpu relax intercepted.
2654c088493SRaghavendra K T 	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
2664c088493SRaghavendra K T 	 */
2674c088493SRaghavendra K T 	struct {
2684c088493SRaghavendra K T 		bool in_spin_loop;
2694c088493SRaghavendra K T 		bool dy_eligible;
2704c088493SRaghavendra K T 	} spin_loop;
2714c088493SRaghavendra K T #endif
2723a08a8f9SRaghavendra K T 	bool preempted;
273edf88417SAvi Kivity 	struct kvm_vcpu_arch arch;
274edf88417SAvi Kivity };
275edf88417SAvi Kivity 
2766b7e2d09SXiao Guangrong static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
2776b7e2d09SXiao Guangrong {
2786b7e2d09SXiao Guangrong 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
2796b7e2d09SXiao Guangrong }
2806b7e2d09SXiao Guangrong 
281660c22c4STakuya Yoshikawa /*
282660c22c4STakuya Yoshikawa  * Some of the bitops functions do not support too long bitmaps.
283660c22c4STakuya Yoshikawa  * This number must be determined not to exceed such limits.
284660c22c4STakuya Yoshikawa  */
285660c22c4STakuya Yoshikawa #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
286660c22c4STakuya Yoshikawa 
287edf88417SAvi Kivity struct kvm_memory_slot {
288edf88417SAvi Kivity 	gfn_t base_gfn;
289edf88417SAvi Kivity 	unsigned long npages;
290edf88417SAvi Kivity 	unsigned long *dirty_bitmap;
291db3fe4ebSTakuya Yoshikawa 	struct kvm_arch_memory_slot arch;
292edf88417SAvi Kivity 	unsigned long userspace_addr;
2936104f472SAlex Williamson 	u32 flags;
2941e702d9aSAlex Williamson 	short id;
295edf88417SAvi Kivity };
296edf88417SAvi Kivity 
29787bf6e7dSTakuya Yoshikawa static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
29887bf6e7dSTakuya Yoshikawa {
29987bf6e7dSTakuya Yoshikawa 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
30087bf6e7dSTakuya Yoshikawa }
30187bf6e7dSTakuya Yoshikawa 
302399ec807SAvi Kivity struct kvm_kernel_irq_routing_entry {
303399ec807SAvi Kivity 	u32 gsi;
3045116d8f6SMichael S. Tsirkin 	u32 type;
3054925663aSGleb Natapov 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
306aa2fbe6dSYang Zhang 		   struct kvm *kvm, int irq_source_id, int level,
307aa2fbe6dSYang Zhang 		   bool line_status);
308399ec807SAvi Kivity 	union {
309399ec807SAvi Kivity 		struct {
310399ec807SAvi Kivity 			unsigned irqchip;
311399ec807SAvi Kivity 			unsigned pin;
312399ec807SAvi Kivity 		} irqchip;
31379950e10SSheng Yang 		struct msi_msg msi;
314399ec807SAvi Kivity 	};
31546e624b9SGleb Natapov 	struct hlist_node link;
31646e624b9SGleb Natapov };
31746e624b9SGleb Natapov 
318a725d56aSAlexander Graf #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
3193e71f88bSGleb Natapov 
32046e624b9SGleb Natapov struct kvm_irq_routing_table {
3218175e5b7SAlexander Graf 	int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
32246e624b9SGleb Natapov 	struct kvm_kernel_irq_routing_entry *rt_entries;
32346e624b9SGleb Natapov 	u32 nr_rt_entries;
32446e624b9SGleb Natapov 	/*
32546e624b9SGleb Natapov 	 * Array indexed by gsi. Each entry contains list of irq chips
32646e624b9SGleb Natapov 	 * the gsi is connected to.
32746e624b9SGleb Natapov 	 */
32846e624b9SGleb Natapov 	struct hlist_head map[0];
329399ec807SAvi Kivity };
330399ec807SAvi Kivity 
3313e71f88bSGleb Natapov #else
3323e71f88bSGleb Natapov 
3333e71f88bSGleb Natapov struct kvm_irq_routing_table {};
3343e71f88bSGleb Natapov 
3353e71f88bSGleb Natapov #endif
3363e71f88bSGleb Natapov 
3370743247fSAlex Williamson #ifndef KVM_PRIVATE_MEM_SLOTS
3380743247fSAlex Williamson #define KVM_PRIVATE_MEM_SLOTS 0
3390743247fSAlex Williamson #endif
3400743247fSAlex Williamson 
34193a5cef0SXiao Guangrong #ifndef KVM_MEM_SLOTS_NUM
342bbacc0c1SAlex Williamson #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
34393a5cef0SXiao Guangrong #endif
34493a5cef0SXiao Guangrong 
345bf3e05bcSXiao Guangrong /*
346bf3e05bcSXiao Guangrong  * Note:
347bf3e05bcSXiao Guangrong  * memslots are not sorted by id anymore, please use id_to_memslot()
348bf3e05bcSXiao Guangrong  * to get the memslot by its id.
349bf3e05bcSXiao Guangrong  */
35046a26bf5SMarcelo Tosatti struct kvm_memslots {
35149c7754cSGleb Natapov 	u64 generation;
35293a5cef0SXiao Guangrong 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
353f85e2cb5SXiao Guangrong 	/* The mapping table from slot id to the index in memslots[]. */
3541e702d9aSAlex Williamson 	short id_to_index[KVM_MEM_SLOTS_NUM];
35546a26bf5SMarcelo Tosatti };
35646a26bf5SMarcelo Tosatti 
357edf88417SAvi Kivity struct kvm {
358aaee2c94SMarcelo Tosatti 	spinlock_t mmu_lock;
35979fac95eSMarcelo Tosatti 	struct mutex slots_lock;
360edf88417SAvi Kivity 	struct mm_struct *mm; /* userspace tied to this vm */
36146a26bf5SMarcelo Tosatti 	struct kvm_memslots *memslots;
362bc6678a3SMarcelo Tosatti 	struct srcu_struct srcu;
36373880c80SGleb Natapov #ifdef CONFIG_KVM_APIC_ARCHITECTURE
36473880c80SGleb Natapov 	u32 bsp_vcpu_id;
36573880c80SGleb Natapov #endif
366edf88417SAvi Kivity 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
36773880c80SGleb Natapov 	atomic_t online_vcpus;
368217ece61SRik van Riel 	int last_boosted_vcpu;
369edf88417SAvi Kivity 	struct list_head vm_list;
37060eead79SMarcelo Tosatti 	struct mutex lock;
371e93f8a0fSMarcelo Tosatti 	struct kvm_io_bus *buses[KVM_NR_BUSES];
372721eecbfSGregory Haskins #ifdef CONFIG_HAVE_KVM_EVENTFD
373721eecbfSGregory Haskins 	struct {
374721eecbfSGregory Haskins 		spinlock_t        lock;
375721eecbfSGregory Haskins 		struct list_head  items;
3767a84428aSAlex Williamson 		struct list_head  resampler_list;
3777a84428aSAlex Williamson 		struct mutex      resampler_lock;
378721eecbfSGregory Haskins 	} irqfds;
379d34e6b17SGregory Haskins 	struct list_head ioeventfds;
380721eecbfSGregory Haskins #endif
381edf88417SAvi Kivity 	struct kvm_vm_stat stat;
382edf88417SAvi Kivity 	struct kvm_arch arch;
383d39f13b0SIzik Eidus 	atomic_t users_count;
3845f94c174SLaurent Vivier #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
3855f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
3862b3c246aSSasha Levin 	spinlock_t ring_lock;
3872b3c246aSSasha Levin 	struct list_head coalesced_zones;
3885f94c174SLaurent Vivier #endif
389e930bffeSAndrea Arcangeli 
39060eead79SMarcelo Tosatti 	struct mutex irq_lock;
39175858a84SAvi Kivity #ifdef CONFIG_HAVE_KVM_IRQCHIP
392bd2b53b2SMichael S. Tsirkin 	/*
393bd2b53b2SMichael S. Tsirkin 	 * Update side is protected by irq_lock and,
394bd2b53b2SMichael S. Tsirkin 	 * if configured, irqfds.lock.
395bd2b53b2SMichael S. Tsirkin 	 */
3964b6a2872SArnd Bergmann 	struct kvm_irq_routing_table __rcu *irq_routing;
39775858a84SAvi Kivity 	struct hlist_head mask_notifier_list;
398136bdfeeSGleb Natapov 	struct hlist_head irq_ack_notifier_list;
39975858a84SAvi Kivity #endif
40075858a84SAvi Kivity 
40136c1ed82SMarc Zyngier #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
402e930bffeSAndrea Arcangeli 	struct mmu_notifier mmu_notifier;
403e930bffeSAndrea Arcangeli 	unsigned long mmu_notifier_seq;
404e930bffeSAndrea Arcangeli 	long mmu_notifier_count;
405e930bffeSAndrea Arcangeli #endif
4065c663a15SAvi Kivity 	long tlbs_dirty;
40707f0a7bdSScott Wood 	struct list_head devices;
408edf88417SAvi Kivity };
409edf88417SAvi Kivity 
410a737f256SChristoffer Dall #define kvm_err(fmt, ...) \
411a737f256SChristoffer Dall 	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
412a737f256SChristoffer Dall #define kvm_info(fmt, ...) \
413a737f256SChristoffer Dall 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
414a737f256SChristoffer Dall #define kvm_debug(fmt, ...) \
415a737f256SChristoffer Dall 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
416a737f256SChristoffer Dall #define kvm_pr_unimpl(fmt, ...) \
417a737f256SChristoffer Dall 	pr_err_ratelimited("kvm [%i]: " fmt, \
418a737f256SChristoffer Dall 			   task_tgid_nr(current), ## __VA_ARGS__)
419edf88417SAvi Kivity 
420a737f256SChristoffer Dall /* The guest did something we don't support. */
421a737f256SChristoffer Dall #define vcpu_unimpl(vcpu, fmt, ...)					\
422a737f256SChristoffer Dall 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
423edf88417SAvi Kivity 
424988a2caeSGleb Natapov static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
425988a2caeSGleb Natapov {
426988a2caeSGleb Natapov 	smp_rmb();
427988a2caeSGleb Natapov 	return kvm->vcpus[i];
428988a2caeSGleb Natapov }
429988a2caeSGleb Natapov 
430988a2caeSGleb Natapov #define kvm_for_each_vcpu(idx, vcpup, kvm) \
431b42fc3cbSJeff Mahoney 	for (idx = 0; \
432b42fc3cbSJeff Mahoney 	     idx < atomic_read(&kvm->online_vcpus) && \
433b42fc3cbSJeff Mahoney 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
434b42fc3cbSJeff Mahoney 	     idx++)
435988a2caeSGleb Natapov 
436be6ba0f0SXiao Guangrong #define kvm_for_each_memslot(memslot, slots)	\
437be6ba0f0SXiao Guangrong 	for (memslot = &slots->memslots[0];	\
438bf3e05bcSXiao Guangrong 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
439bf3e05bcSXiao Guangrong 		memslot++)
440be6ba0f0SXiao Guangrong 
441edf88417SAvi Kivity int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
442edf88417SAvi Kivity void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
443edf88417SAvi Kivity 
4449fc77441SMichael S. Tsirkin int __must_check vcpu_load(struct kvm_vcpu *vcpu);
445edf88417SAvi Kivity void vcpu_put(struct kvm_vcpu *vcpu);
446edf88417SAvi Kivity 
447a725d56aSAlexander Graf #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
448a0f155e9SCornelia Huck int kvm_irqfd_init(void);
449a0f155e9SCornelia Huck void kvm_irqfd_exit(void);
450a0f155e9SCornelia Huck #else
451a0f155e9SCornelia Huck static inline int kvm_irqfd_init(void)
452a0f155e9SCornelia Huck {
453a0f155e9SCornelia Huck 	return 0;
454a0f155e9SCornelia Huck }
455a0f155e9SCornelia Huck 
456a0f155e9SCornelia Huck static inline void kvm_irqfd_exit(void)
457a0f155e9SCornelia Huck {
458a0f155e9SCornelia Huck }
459a0f155e9SCornelia Huck #endif
4600ee75beaSAvi Kivity int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
461edf88417SAvi Kivity 		  struct module *module);
462edf88417SAvi Kivity void kvm_exit(void);
463edf88417SAvi Kivity 
464d39f13b0SIzik Eidus void kvm_get_kvm(struct kvm *kvm);
465d39f13b0SIzik Eidus void kvm_put_kvm(struct kvm *kvm);
466116c14c0SAlex Williamson void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
467116c14c0SAlex Williamson 		     u64 last_generation);
468d39f13b0SIzik Eidus 
46990d83dc3SLai Jiangshan static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
47090d83dc3SLai Jiangshan {
47190d83dc3SLai Jiangshan 	return rcu_dereference_check(kvm->memslots,
47290d83dc3SLai Jiangshan 			srcu_read_lock_held(&kvm->srcu)
47390d83dc3SLai Jiangshan 			|| lockdep_is_held(&kvm->slots_lock));
47490d83dc3SLai Jiangshan }
47590d83dc3SLai Jiangshan 
47628a37544SXiao Guangrong static inline struct kvm_memory_slot *
47728a37544SXiao Guangrong id_to_memslot(struct kvm_memslots *slots, int id)
47828a37544SXiao Guangrong {
479f85e2cb5SXiao Guangrong 	int index = slots->id_to_index[id];
480f85e2cb5SXiao Guangrong 	struct kvm_memory_slot *slot;
481bf3e05bcSXiao Guangrong 
482f85e2cb5SXiao Guangrong 	slot = &slots->memslots[index];
483bf3e05bcSXiao Guangrong 
484f85e2cb5SXiao Guangrong 	WARN_ON(slot->id != id);
485f85e2cb5SXiao Guangrong 	return slot;
48628a37544SXiao Guangrong }
48728a37544SXiao Guangrong 
48874d0727cSTakuya Yoshikawa /*
48974d0727cSTakuya Yoshikawa  * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
49074d0727cSTakuya Yoshikawa  * - create a new memory slot
49174d0727cSTakuya Yoshikawa  * - delete an existing memory slot
49274d0727cSTakuya Yoshikawa  * - modify an existing memory slot
49374d0727cSTakuya Yoshikawa  *   -- move it in the guest physical memory space
49474d0727cSTakuya Yoshikawa  *   -- just change its flags
49574d0727cSTakuya Yoshikawa  *
49674d0727cSTakuya Yoshikawa  * Since flags can be changed by some of these operations, the following
49774d0727cSTakuya Yoshikawa  * differentiation is the best we can do for __kvm_set_memory_region():
49874d0727cSTakuya Yoshikawa  */
49974d0727cSTakuya Yoshikawa enum kvm_mr_change {
50074d0727cSTakuya Yoshikawa 	KVM_MR_CREATE,
50174d0727cSTakuya Yoshikawa 	KVM_MR_DELETE,
50274d0727cSTakuya Yoshikawa 	KVM_MR_MOVE,
50374d0727cSTakuya Yoshikawa 	KVM_MR_FLAGS_ONLY,
50474d0727cSTakuya Yoshikawa };
50574d0727cSTakuya Yoshikawa 
506edf88417SAvi Kivity int kvm_set_memory_region(struct kvm *kvm,
50747ae31e2STakuya Yoshikawa 			  struct kvm_userspace_memory_region *mem);
508edf88417SAvi Kivity int __kvm_set_memory_region(struct kvm *kvm,
50947ae31e2STakuya Yoshikawa 			    struct kvm_userspace_memory_region *mem);
510db3fe4ebSTakuya Yoshikawa void kvm_arch_free_memslot(struct kvm_memory_slot *free,
511db3fe4ebSTakuya Yoshikawa 			   struct kvm_memory_slot *dont);
512db3fe4ebSTakuya Yoshikawa int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
513e59dbe09STakuya Yoshikawa void kvm_arch_memslots_updated(struct kvm *kvm);
514f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
515f7784b8eSMarcelo Tosatti 				struct kvm_memory_slot *memslot,
516f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
5177b6195a9STakuya Yoshikawa 				enum kvm_mr_change change);
518f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
519edf88417SAvi Kivity 				struct kvm_userspace_memory_region *mem,
5208482644aSTakuya Yoshikawa 				const struct kvm_memory_slot *old,
5218482644aSTakuya Yoshikawa 				enum kvm_mr_change change);
522db3fe4ebSTakuya Yoshikawa bool kvm_largepages_enabled(void);
52354dee993SMarcelo Tosatti void kvm_disable_largepages(void);
5242df72e9bSMarcelo Tosatti /* flush all memory translations */
5252df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_all(struct kvm *kvm);
5262df72e9bSMarcelo Tosatti /* flush memory translations pointing to 'slot' */
5272df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
5282df72e9bSMarcelo Tosatti 				   struct kvm_memory_slot *slot);
529a983fb23SMarcelo Tosatti 
53048987781SXiao Guangrong int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
53148987781SXiao Guangrong 			    int nr_pages);
53248987781SXiao Guangrong 
533edf88417SAvi Kivity struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
53405da4558SMarcelo Tosatti unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
535ba6a3541SPaolo Bonzini unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
5364d8b81abSXiao Guangrong unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
537edf88417SAvi Kivity void kvm_release_page_clean(struct page *page);
538edf88417SAvi Kivity void kvm_release_page_dirty(struct page *page);
53935149e21SAnthony Liguori void kvm_set_page_dirty(struct page *page);
54035149e21SAnthony Liguori void kvm_set_page_accessed(struct page *page);
54135149e21SAnthony Liguori 
542365fb3fdSXiao Guangrong pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
543612819c3SMarcelo Tosatti pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
544612819c3SMarcelo Tosatti 		       bool write_fault, bool *writable);
54535149e21SAnthony Liguori pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
546612819c3SMarcelo Tosatti pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
547612819c3SMarcelo Tosatti 		      bool *writable);
548d5661048SXiao Guangrong pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
549037d92dcSXiao Guangrong pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
550037d92dcSXiao Guangrong 
55132cad84fSXiao Guangrong void kvm_release_pfn_dirty(pfn_t pfn);
55235149e21SAnthony Liguori void kvm_release_pfn_clean(pfn_t pfn);
55335149e21SAnthony Liguori void kvm_set_pfn_dirty(pfn_t pfn);
55435149e21SAnthony Liguori void kvm_set_pfn_accessed(pfn_t pfn);
55535149e21SAnthony Liguori void kvm_get_pfn(pfn_t pfn);
55635149e21SAnthony Liguori 
557edf88417SAvi Kivity int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
558edf88417SAvi Kivity 			int len);
5597ec54588SMarcelo Tosatti int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
5607ec54588SMarcelo Tosatti 			  unsigned long len);
561edf88417SAvi Kivity int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
562e03b644fSGleb Natapov int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
563e03b644fSGleb Natapov 			   void *data, unsigned long len);
564edf88417SAvi Kivity int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
565edf88417SAvi Kivity 			 int offset, int len);
566edf88417SAvi Kivity int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
567edf88417SAvi Kivity 		    unsigned long len);
56849c7754cSGleb Natapov int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
56949c7754cSGleb Natapov 			   void *data, unsigned long len);
57049c7754cSGleb Natapov int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
5718f964525SAndrew Honig 			      gpa_t gpa, unsigned long len);
572edf88417SAvi Kivity int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
573edf88417SAvi Kivity int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
574edf88417SAvi Kivity struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
575edf88417SAvi Kivity int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
5768f0b1ab6SJoerg Roedel unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
577edf88417SAvi Kivity void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
57849c7754cSGleb Natapov void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
57949c7754cSGleb Natapov 			     gfn_t gfn);
580edf88417SAvi Kivity 
581edf88417SAvi Kivity void kvm_vcpu_block(struct kvm_vcpu *vcpu);
582b6d33834SChristoffer Dall void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
58341628d33SKonstantin Weitz bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
584d255f4f2SZhai, Edwin void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
585edf88417SAvi Kivity void kvm_resched(struct kvm_vcpu *vcpu);
586edf88417SAvi Kivity void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
587edf88417SAvi Kivity void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
588a4ee1ca4SXiao Guangrong 
589edf88417SAvi Kivity void kvm_flush_remote_tlbs(struct kvm *kvm);
5902e53d63aSMarcelo Tosatti void kvm_reload_remote_mmus(struct kvm *kvm);
591d828199eSMarcelo Tosatti void kvm_make_mclock_inprogress_request(struct kvm *kvm);
5923d81bc7eSYang Zhang void kvm_make_scan_ioapic_request(struct kvm *kvm);
593edf88417SAvi Kivity 
594edf88417SAvi Kivity long kvm_arch_dev_ioctl(struct file *filp,
595edf88417SAvi Kivity 			unsigned int ioctl, unsigned long arg);
596edf88417SAvi Kivity long kvm_arch_vcpu_ioctl(struct file *filp,
597edf88417SAvi Kivity 			 unsigned int ioctl, unsigned long arg);
5985b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
599edf88417SAvi Kivity 
600edf88417SAvi Kivity int kvm_dev_ioctl_check_extension(long ext);
601edf88417SAvi Kivity 
602edf88417SAvi Kivity int kvm_get_dirty_log(struct kvm *kvm,
603edf88417SAvi Kivity 			struct kvm_dirty_log *log, int *is_dirty);
604edf88417SAvi Kivity int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
605edf88417SAvi Kivity 				struct kvm_dirty_log *log);
606edf88417SAvi Kivity 
607edf88417SAvi Kivity int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
60847ae31e2STakuya Yoshikawa 				   struct kvm_userspace_memory_region *mem);
609aa2fbe6dSYang Zhang int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
610aa2fbe6dSYang Zhang 			bool line_status);
611edf88417SAvi Kivity long kvm_arch_vm_ioctl(struct file *filp,
612edf88417SAvi Kivity 		       unsigned int ioctl, unsigned long arg);
613edf88417SAvi Kivity 
614edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
615edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
616edf88417SAvi Kivity 
617edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
618edf88417SAvi Kivity 				    struct kvm_translation *tr);
619edf88417SAvi Kivity 
620edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
621edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
622edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
623edf88417SAvi Kivity 				  struct kvm_sregs *sregs);
624edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
625edf88417SAvi Kivity 				  struct kvm_sregs *sregs);
62662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
62762d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state);
62862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
62962d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state);
630d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
631d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg);
632edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
633edf88417SAvi Kivity 
634edf88417SAvi Kivity int kvm_arch_init(void *opaque);
635edf88417SAvi Kivity void kvm_arch_exit(void);
636edf88417SAvi Kivity 
637edf88417SAvi Kivity int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
638edf88417SAvi Kivity void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
639edf88417SAvi Kivity 
640edf88417SAvi Kivity void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
641edf88417SAvi Kivity void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
642edf88417SAvi Kivity void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
643edf88417SAvi Kivity struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
644edf88417SAvi Kivity int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
64542897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
646edf88417SAvi Kivity void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
647edf88417SAvi Kivity 
64810474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage);
649edf88417SAvi Kivity void kvm_arch_hardware_disable(void *garbage);
650edf88417SAvi Kivity int kvm_arch_hardware_setup(void);
651edf88417SAvi Kivity void kvm_arch_hardware_unsetup(void);
652edf88417SAvi Kivity void kvm_arch_check_processor_compat(void *rtn);
653edf88417SAvi Kivity int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
654b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
655edf88417SAvi Kivity 
656edf88417SAvi Kivity void kvm_free_physmem(struct kvm *kvm);
657edf88417SAvi Kivity 
658c1a7b32aSTakuya Yoshikawa void *kvm_kvzalloc(unsigned long size);
659c1a7b32aSTakuya Yoshikawa void kvm_kvfree(const void *addr);
660c1a7b32aSTakuya Yoshikawa 
661d89f5effSJan Kiszka #ifndef __KVM_HAVE_ARCH_VM_ALLOC
662d89f5effSJan Kiszka static inline struct kvm *kvm_arch_alloc_vm(void)
663d89f5effSJan Kiszka {
664d89f5effSJan Kiszka 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
665d89f5effSJan Kiszka }
666d89f5effSJan Kiszka 
667d89f5effSJan Kiszka static inline void kvm_arch_free_vm(struct kvm *kvm)
668d89f5effSJan Kiszka {
669d89f5effSJan Kiszka 	kfree(kvm);
670d89f5effSJan Kiszka }
671d89f5effSJan Kiszka #endif
672d89f5effSJan Kiszka 
673b6d33834SChristoffer Dall static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
674b6d33834SChristoffer Dall {
6752246f8b5SAlexander Graf #ifdef __KVM_HAVE_ARCH_WQP
6762246f8b5SAlexander Graf 	return vcpu->arch.wqp;
6772246f8b5SAlexander Graf #else
678b6d33834SChristoffer Dall 	return &vcpu->wq;
679b6d33834SChristoffer Dall #endif
6802246f8b5SAlexander Graf }
681b6d33834SChristoffer Dall 
682e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
683edf88417SAvi Kivity void kvm_arch_destroy_vm(struct kvm *kvm);
684ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm);
685edf88417SAvi Kivity 
6863d80840dSMarcelo Tosatti int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
6875736199aSZhang Xiantao void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
688edf88417SAvi Kivity 
689a2766325SXiao Guangrong bool kvm_is_mmio_pfn(pfn_t pfn);
690c77fb9dcSXiantao Zhang 
69162c476c7SBen-Ami Yassour struct kvm_irq_ack_notifier {
69262c476c7SBen-Ami Yassour 	struct hlist_node link;
69362c476c7SBen-Ami Yassour 	unsigned gsi;
69462c476c7SBen-Ami Yassour 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
69562c476c7SBen-Ami Yassour };
69662c476c7SBen-Ami Yassour 
69762c476c7SBen-Ami Yassour struct kvm_assigned_dev_kernel {
69862c476c7SBen-Ami Yassour 	struct kvm_irq_ack_notifier ack_notifier;
69962c476c7SBen-Ami Yassour 	struct list_head list;
70062c476c7SBen-Ami Yassour 	int assigned_dev_id;
701ab9f4ecbSZhai, Edwin 	int host_segnr;
70262c476c7SBen-Ami Yassour 	int host_busnr;
70362c476c7SBen-Ami Yassour 	int host_devfn;
704c1e01514SSheng Yang 	unsigned int entries_nr;
70562c476c7SBen-Ami Yassour 	int host_irq;
706defaf158SMark McLoughlin 	bool host_irq_disabled;
70707700a94SJan Kiszka 	bool pci_2_3;
708c1e01514SSheng Yang 	struct msix_entry *host_msix_entries;
70962c476c7SBen-Ami Yassour 	int guest_irq;
7100645211cSJan Kiszka 	struct msix_entry *guest_msix_entries;
7114f906c19SSheng Yang 	unsigned long irq_requested_type;
7125550af4dSSheng Yang 	int irq_source_id;
713b653574aSWeidong Han 	int flags;
71462c476c7SBen-Ami Yassour 	struct pci_dev *dev;
71562c476c7SBen-Ami Yassour 	struct kvm *kvm;
7160645211cSJan Kiszka 	spinlock_t intx_lock;
717cf9eeac4SJan Kiszka 	spinlock_t intx_mask_lock;
7181e001d49SJan Kiszka 	char irq_name[32];
719f8fcfd77SAlex Williamson 	struct pci_saved_state *pci_saved_state;
72062c476c7SBen-Ami Yassour };
72175858a84SAvi Kivity 
72275858a84SAvi Kivity struct kvm_irq_mask_notifier {
72375858a84SAvi Kivity 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
72475858a84SAvi Kivity 	int irq;
72575858a84SAvi Kivity 	struct hlist_node link;
72675858a84SAvi Kivity };
72775858a84SAvi Kivity 
72875858a84SAvi Kivity void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
72975858a84SAvi Kivity 				    struct kvm_irq_mask_notifier *kimn);
73075858a84SAvi Kivity void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
73175858a84SAvi Kivity 				      struct kvm_irq_mask_notifier *kimn);
7324a994358SGleb Natapov void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
7334a994358SGleb Natapov 			     bool mask);
73475858a84SAvi Kivity 
735aa2fbe6dSYang Zhang int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
736aa2fbe6dSYang Zhang 		bool line_status);
73701f21880SMichael S. Tsirkin int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
738bd2b53b2SMichael S. Tsirkin int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
739aa2fbe6dSYang Zhang 		int irq_source_id, int level, bool line_status);
740c7c9c56cSYang Zhang bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
74144882eedSMarcelo Tosatti void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
7423de42dc0SXiantao Zhang void kvm_register_irq_ack_notifier(struct kvm *kvm,
7433de42dc0SXiantao Zhang 				   struct kvm_irq_ack_notifier *kian);
744fa40a821SMarcelo Tosatti void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
745fa40a821SMarcelo Tosatti 				   struct kvm_irq_ack_notifier *kian);
7465550af4dSSheng Yang int kvm_request_irq_source_id(struct kvm *kvm);
7475550af4dSSheng Yang void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
74862c476c7SBen-Ami Yassour 
749522c68c4SSheng Yang /* For vcpu->arch.iommu_flags */
750522c68c4SSheng Yang #define KVM_IOMMU_CACHE_COHERENCY	0x1
751522c68c4SSheng Yang 
7522a5bab10SAlex Williamson #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
7533ad26d81SMarcelo Tosatti int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
75432f6daadSAlex Williamson void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
755260782bcSWeidong Han int kvm_iommu_map_guest(struct kvm *kvm);
75662c476c7SBen-Ami Yassour int kvm_iommu_unmap_guest(struct kvm *kvm);
757260782bcSWeidong Han int kvm_assign_device(struct kvm *kvm,
758260782bcSWeidong Han 		      struct kvm_assigned_dev_kernel *assigned_dev);
7590a920356SWeidong Han int kvm_deassign_device(struct kvm *kvm,
7600a920356SWeidong Han 			struct kvm_assigned_dev_kernel *assigned_dev);
7612a5bab10SAlex Williamson #else
76262c476c7SBen-Ami Yassour static inline int kvm_iommu_map_pages(struct kvm *kvm,
763d7a79b6cSJan Kiszka 				      struct kvm_memory_slot *slot)
76462c476c7SBen-Ami Yassour {
76562c476c7SBen-Ami Yassour 	return 0;
76662c476c7SBen-Ami Yassour }
76762c476c7SBen-Ami Yassour 
76832f6daadSAlex Williamson static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
76932f6daadSAlex Williamson 					 struct kvm_memory_slot *slot)
77032f6daadSAlex Williamson {
77132f6daadSAlex Williamson }
77232f6daadSAlex Williamson 
77362c476c7SBen-Ami Yassour static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
77462c476c7SBen-Ami Yassour {
77562c476c7SBen-Ami Yassour 	return 0;
77662c476c7SBen-Ami Yassour }
7772a5bab10SAlex Williamson #endif
77862c476c7SBen-Ami Yassour 
779c11f11fcSFrederic Weisbecker static inline void kvm_guest_enter(void)
780c11f11fcSFrederic Weisbecker {
781c11f11fcSFrederic Weisbecker 	unsigned long flags;
782c11f11fcSFrederic Weisbecker 
783c11f11fcSFrederic Weisbecker 	BUG_ON(preemptible());
784c11f11fcSFrederic Weisbecker 
785c11f11fcSFrederic Weisbecker 	local_irq_save(flags);
786c11f11fcSFrederic Weisbecker 	guest_enter();
787c11f11fcSFrederic Weisbecker 	local_irq_restore(flags);
788c11f11fcSFrederic Weisbecker 
7898fa22068SGleb Natapov 	/* KVM does not hold any references to rcu protected data when it
7908fa22068SGleb Natapov 	 * switches CPU into a guest mode. In fact switching to a guest mode
7918fa22068SGleb Natapov 	 * is very similar to exiting to userspase from rcu point of view. In
7928fa22068SGleb Natapov 	 * addition CPU may stay in a guest mode for quite a long time (up to
7938fa22068SGleb Natapov 	 * one time slice). Lets treat guest mode as quiescent state, just like
7948fa22068SGleb Natapov 	 * we do with user-mode execution.
7958fa22068SGleb Natapov 	 */
7968fa22068SGleb Natapov 	rcu_virt_note_context_switch(smp_processor_id());
797edf88417SAvi Kivity }
798edf88417SAvi Kivity 
799edf88417SAvi Kivity static inline void kvm_guest_exit(void)
800edf88417SAvi Kivity {
801c11f11fcSFrederic Weisbecker 	unsigned long flags;
802c11f11fcSFrederic Weisbecker 
803c11f11fcSFrederic Weisbecker 	local_irq_save(flags);
804c11f11fcSFrederic Weisbecker 	guest_exit();
805c11f11fcSFrederic Weisbecker 	local_irq_restore(flags);
806edf88417SAvi Kivity }
807edf88417SAvi Kivity 
8089d4cba7fSPaul Mackerras /*
8099d4cba7fSPaul Mackerras  * search_memslots() and __gfn_to_memslot() are here because they are
8109d4cba7fSPaul Mackerras  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
8119d4cba7fSPaul Mackerras  * gfn_to_memslot() itself isn't here as an inline because that would
8129d4cba7fSPaul Mackerras  * bloat other code too much.
8139d4cba7fSPaul Mackerras  */
8149d4cba7fSPaul Mackerras static inline struct kvm_memory_slot *
8159d4cba7fSPaul Mackerras search_memslots(struct kvm_memslots *slots, gfn_t gfn)
8169d4cba7fSPaul Mackerras {
8179d4cba7fSPaul Mackerras 	struct kvm_memory_slot *memslot;
8189d4cba7fSPaul Mackerras 
8199d4cba7fSPaul Mackerras 	kvm_for_each_memslot(memslot, slots)
8209d4cba7fSPaul Mackerras 		if (gfn >= memslot->base_gfn &&
8219d4cba7fSPaul Mackerras 		      gfn < memslot->base_gfn + memslot->npages)
8229d4cba7fSPaul Mackerras 			return memslot;
8239d4cba7fSPaul Mackerras 
8249d4cba7fSPaul Mackerras 	return NULL;
8259d4cba7fSPaul Mackerras }
8269d4cba7fSPaul Mackerras 
8279d4cba7fSPaul Mackerras static inline struct kvm_memory_slot *
8289d4cba7fSPaul Mackerras __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
8299d4cba7fSPaul Mackerras {
8309d4cba7fSPaul Mackerras 	return search_memslots(slots, gfn);
8319d4cba7fSPaul Mackerras }
8329d4cba7fSPaul Mackerras 
83366a03505SGavin Shan static inline unsigned long
83466a03505SGavin Shan __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
83566a03505SGavin Shan {
83666a03505SGavin Shan 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
83766a03505SGavin Shan }
83866a03505SGavin Shan 
8390ee8dcb8SXiao Guangrong static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
8400ee8dcb8SXiao Guangrong {
8410ee8dcb8SXiao Guangrong 	return gfn_to_memslot(kvm, gfn)->id;
8420ee8dcb8SXiao Guangrong }
8430ee8dcb8SXiao Guangrong 
844d19a748bSTakuya Yoshikawa static inline gfn_t
845d19a748bSTakuya Yoshikawa hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
846887c08acSXiao Guangrong {
847d19a748bSTakuya Yoshikawa 	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
848d19a748bSTakuya Yoshikawa 
849d19a748bSTakuya Yoshikawa 	return slot->base_gfn + gfn_offset;
850887c08acSXiao Guangrong }
851887c08acSXiao Guangrong 
852edf88417SAvi Kivity static inline gpa_t gfn_to_gpa(gfn_t gfn)
853edf88417SAvi Kivity {
854edf88417SAvi Kivity 	return (gpa_t)gfn << PAGE_SHIFT;
855edf88417SAvi Kivity }
856edf88417SAvi Kivity 
857c30a358dSJoerg Roedel static inline gfn_t gpa_to_gfn(gpa_t gpa)
858c30a358dSJoerg Roedel {
859c30a358dSJoerg Roedel 	return (gfn_t)(gpa >> PAGE_SHIFT);
860c30a358dSJoerg Roedel }
861c30a358dSJoerg Roedel 
86262c476c7SBen-Ami Yassour static inline hpa_t pfn_to_hpa(pfn_t pfn)
86362c476c7SBen-Ami Yassour {
86462c476c7SBen-Ami Yassour 	return (hpa_t)pfn << PAGE_SHIFT;
86562c476c7SBen-Ami Yassour }
86662c476c7SBen-Ami Yassour 
8672f599714SMarcelo Tosatti static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
8682f52d58cSAvi Kivity {
8692f52d58cSAvi Kivity 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
8702f52d58cSAvi Kivity }
8712f52d58cSAvi Kivity 
872edf88417SAvi Kivity enum kvm_stat_kind {
873edf88417SAvi Kivity 	KVM_STAT_VM,
874edf88417SAvi Kivity 	KVM_STAT_VCPU,
875edf88417SAvi Kivity };
876edf88417SAvi Kivity 
877edf88417SAvi Kivity struct kvm_stats_debugfs_item {
878edf88417SAvi Kivity 	const char *name;
879edf88417SAvi Kivity 	int offset;
880edf88417SAvi Kivity 	enum kvm_stat_kind kind;
881edf88417SAvi Kivity 	struct dentry *dentry;
882edf88417SAvi Kivity };
883edf88417SAvi Kivity extern struct kvm_stats_debugfs_item debugfs_entries[];
88476f7c879SHollis Blanchard extern struct dentry *kvm_debugfs_dir;
885d4c9ff2dSFeng(Eric) Liu 
88636c1ed82SMarc Zyngier #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
8878ca40a70SChristoffer Dall static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
888e930bffeSAndrea Arcangeli {
8898ca40a70SChristoffer Dall 	if (unlikely(kvm->mmu_notifier_count))
890e930bffeSAndrea Arcangeli 		return 1;
891e930bffeSAndrea Arcangeli 	/*
892a355aa54SPaul Mackerras 	 * Ensure the read of mmu_notifier_count happens before the read
893a355aa54SPaul Mackerras 	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
894a355aa54SPaul Mackerras 	 * mmu_notifier_invalidate_range_end to make sure that the caller
895a355aa54SPaul Mackerras 	 * either sees the old (non-zero) value of mmu_notifier_count or
896a355aa54SPaul Mackerras 	 * the new (incremented) value of mmu_notifier_seq.
897a355aa54SPaul Mackerras 	 * PowerPC Book3s HV KVM calls this under a per-page lock
898a355aa54SPaul Mackerras 	 * rather than under kvm->mmu_lock, for scalability, so
899a355aa54SPaul Mackerras 	 * can't rely on kvm->mmu_lock to keep things ordered.
900e930bffeSAndrea Arcangeli 	 */
901a355aa54SPaul Mackerras 	smp_rmb();
9028ca40a70SChristoffer Dall 	if (kvm->mmu_notifier_seq != mmu_seq)
903e930bffeSAndrea Arcangeli 		return 1;
904e930bffeSAndrea Arcangeli 	return 0;
905e930bffeSAndrea Arcangeli }
906e930bffeSAndrea Arcangeli #endif
907e930bffeSAndrea Arcangeli 
908a725d56aSAlexander Graf #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
909399ec807SAvi Kivity 
910399ec807SAvi Kivity #define KVM_MAX_IRQ_ROUTES 1024
911399ec807SAvi Kivity 
912399ec807SAvi Kivity int kvm_setup_default_irq_routing(struct kvm *kvm);
913399ec807SAvi Kivity int kvm_set_irq_routing(struct kvm *kvm,
914399ec807SAvi Kivity 			const struct kvm_irq_routing_entry *entries,
915399ec807SAvi Kivity 			unsigned nr,
916399ec807SAvi Kivity 			unsigned flags);
917e8cde093SAlexander Graf int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
918e8cde093SAlexander Graf 			  struct kvm_kernel_irq_routing_entry *e,
919e8cde093SAlexander Graf 			  const struct kvm_irq_routing_entry *ue);
920399ec807SAvi Kivity void kvm_free_irq_routing(struct kvm *kvm);
921399ec807SAvi Kivity 
92207975ad3SJan Kiszka int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
92307975ad3SJan Kiszka 
924399ec807SAvi Kivity #else
925399ec807SAvi Kivity 
926399ec807SAvi Kivity static inline void kvm_free_irq_routing(struct kvm *kvm) {}
927399ec807SAvi Kivity 
928399ec807SAvi Kivity #endif
929399ec807SAvi Kivity 
930721eecbfSGregory Haskins #ifdef CONFIG_HAVE_KVM_EVENTFD
931721eecbfSGregory Haskins 
932d34e6b17SGregory Haskins void kvm_eventfd_init(struct kvm *kvm);
933914daba8SAlexander Graf int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
934914daba8SAlexander Graf 
935914daba8SAlexander Graf #ifdef CONFIG_HAVE_KVM_IRQCHIP
936d4db2935SAlex Williamson int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
937721eecbfSGregory Haskins void kvm_irqfd_release(struct kvm *kvm);
938bd2b53b2SMichael S. Tsirkin void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
939914daba8SAlexander Graf #else
940914daba8SAlexander Graf static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
941914daba8SAlexander Graf {
942914daba8SAlexander Graf 	return -EINVAL;
943914daba8SAlexander Graf }
944914daba8SAlexander Graf 
945914daba8SAlexander Graf static inline void kvm_irqfd_release(struct kvm *kvm) {}
946914daba8SAlexander Graf #endif
947721eecbfSGregory Haskins 
948721eecbfSGregory Haskins #else
949721eecbfSGregory Haskins 
950d34e6b17SGregory Haskins static inline void kvm_eventfd_init(struct kvm *kvm) {}
951bd2b53b2SMichael S. Tsirkin 
952d4db2935SAlex Williamson static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
953721eecbfSGregory Haskins {
954721eecbfSGregory Haskins 	return -EINVAL;
955721eecbfSGregory Haskins }
956721eecbfSGregory Haskins 
957721eecbfSGregory Haskins static inline void kvm_irqfd_release(struct kvm *kvm) {}
958bd2b53b2SMichael S. Tsirkin 
95927923eb1SAlexander Graf #ifdef CONFIG_HAVE_KVM_IRQCHIP
960bd2b53b2SMichael S. Tsirkin static inline void kvm_irq_routing_update(struct kvm *kvm,
961bd2b53b2SMichael S. Tsirkin 					  struct kvm_irq_routing_table *irq_rt)
962bd2b53b2SMichael S. Tsirkin {
963bd2b53b2SMichael S. Tsirkin 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
964bd2b53b2SMichael S. Tsirkin }
96527923eb1SAlexander Graf #endif
966bd2b53b2SMichael S. Tsirkin 
967d34e6b17SGregory Haskins static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
968d34e6b17SGregory Haskins {
969d34e6b17SGregory Haskins 	return -ENOSYS;
970d34e6b17SGregory Haskins }
971721eecbfSGregory Haskins 
972721eecbfSGregory Haskins #endif /* CONFIG_HAVE_KVM_EVENTFD */
973721eecbfSGregory Haskins 
97473880c80SGleb Natapov #ifdef CONFIG_KVM_APIC_ARCHITECTURE
975c5af89b6SGleb Natapov static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
976c5af89b6SGleb Natapov {
977d3efc8efSMarcelo Tosatti 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
978c5af89b6SGleb Natapov }
9793e515705SAvi Kivity 
9803e515705SAvi Kivity bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
9813e515705SAvi Kivity 
9823e515705SAvi Kivity #else
9833e515705SAvi Kivity 
9843e515705SAvi Kivity static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
9853e515705SAvi Kivity 
986edf88417SAvi Kivity #endif
987bfd99ff5SAvi Kivity 
9882a5bab10SAlex Williamson #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
989bfd99ff5SAvi Kivity 
990bfd99ff5SAvi Kivity long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
991bfd99ff5SAvi Kivity 				  unsigned long arg);
992bfd99ff5SAvi Kivity 
9932a5bab10SAlex Williamson void kvm_free_all_assigned_devices(struct kvm *kvm);
9942a5bab10SAlex Williamson 
995bfd99ff5SAvi Kivity #else
996bfd99ff5SAvi Kivity 
997bfd99ff5SAvi Kivity static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
998bfd99ff5SAvi Kivity 						unsigned long arg)
999bfd99ff5SAvi Kivity {
1000bfd99ff5SAvi Kivity 	return -ENOTTY;
1001bfd99ff5SAvi Kivity }
1002bfd99ff5SAvi Kivity 
10032a5bab10SAlex Williamson static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
10042a5bab10SAlex Williamson 
100573880c80SGleb Natapov #endif
1006bfd99ff5SAvi Kivity 
1007a8eeb04aSAvi Kivity static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1008a8eeb04aSAvi Kivity {
1009a8eeb04aSAvi Kivity 	set_bit(req, &vcpu->requests);
1010a8eeb04aSAvi Kivity }
1011a8eeb04aSAvi Kivity 
1012a8eeb04aSAvi Kivity static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1013a8eeb04aSAvi Kivity {
10140719837cSAvi Kivity 	if (test_bit(req, &vcpu->requests)) {
10150719837cSAvi Kivity 		clear_bit(req, &vcpu->requests);
10160719837cSAvi Kivity 		return true;
10170719837cSAvi Kivity 	} else {
10180719837cSAvi Kivity 		return false;
10190719837cSAvi Kivity 	}
1020a8eeb04aSAvi Kivity }
1021a8eeb04aSAvi Kivity 
10228b415dcdSGeoff Levand extern bool kvm_rebooting;
10238b415dcdSGeoff Levand 
1024852b6d57SScott Wood struct kvm_device_ops;
1025852b6d57SScott Wood 
1026852b6d57SScott Wood struct kvm_device {
1027852b6d57SScott Wood 	struct kvm_device_ops *ops;
1028852b6d57SScott Wood 	struct kvm *kvm;
1029852b6d57SScott Wood 	void *private;
103007f0a7bdSScott Wood 	struct list_head vm_node;
1031852b6d57SScott Wood };
1032852b6d57SScott Wood 
1033852b6d57SScott Wood /* create, destroy, and name are mandatory */
1034852b6d57SScott Wood struct kvm_device_ops {
1035852b6d57SScott Wood 	const char *name;
1036852b6d57SScott Wood 	int (*create)(struct kvm_device *dev, u32 type);
1037852b6d57SScott Wood 
1038852b6d57SScott Wood 	/*
1039852b6d57SScott Wood 	 * Destroy is responsible for freeing dev.
1040852b6d57SScott Wood 	 *
1041852b6d57SScott Wood 	 * Destroy may be called before or after destructors are called
1042852b6d57SScott Wood 	 * on emulated I/O regions, depending on whether a reference is
1043852b6d57SScott Wood 	 * held by a vcpu or other kvm component that gets destroyed
1044852b6d57SScott Wood 	 * after the emulated I/O.
1045852b6d57SScott Wood 	 */
1046852b6d57SScott Wood 	void (*destroy)(struct kvm_device *dev);
1047852b6d57SScott Wood 
1048852b6d57SScott Wood 	int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1049852b6d57SScott Wood 	int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1050852b6d57SScott Wood 	int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1051852b6d57SScott Wood 	long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1052852b6d57SScott Wood 		      unsigned long arg);
1053852b6d57SScott Wood };
1054852b6d57SScott Wood 
1055852b6d57SScott Wood void kvm_device_get(struct kvm_device *dev);
1056852b6d57SScott Wood void kvm_device_put(struct kvm_device *dev);
1057852b6d57SScott Wood struct kvm_device *kvm_device_from_filp(struct file *filp);
1058852b6d57SScott Wood 
10595df554adSScott Wood extern struct kvm_device_ops kvm_mpic_ops;
10605975a2e0SPaul Mackerras extern struct kvm_device_ops kvm_xics_ops;
10615df554adSScott Wood 
10624c088493SRaghavendra K T #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
10634c088493SRaghavendra K T 
10644c088493SRaghavendra K T static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
10654c088493SRaghavendra K T {
10664c088493SRaghavendra K T 	vcpu->spin_loop.in_spin_loop = val;
10674c088493SRaghavendra K T }
10684c088493SRaghavendra K T static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
10694c088493SRaghavendra K T {
10704c088493SRaghavendra K T 	vcpu->spin_loop.dy_eligible = val;
10714c088493SRaghavendra K T }
10724c088493SRaghavendra K T 
10734c088493SRaghavendra K T #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
10744c088493SRaghavendra K T 
10754c088493SRaghavendra K T static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
10764c088493SRaghavendra K T {
10774c088493SRaghavendra K T }
10784c088493SRaghavendra K T 
10794c088493SRaghavendra K T static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
10804c088493SRaghavendra K T {
10814c088493SRaghavendra K T }
10824c088493SRaghavendra K T 
108306e48c51SRaghavendra K T static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
108406e48c51SRaghavendra K T {
108506e48c51SRaghavendra K T 	return true;
108606e48c51SRaghavendra K T }
108706e48c51SRaghavendra K T 
10884c088493SRaghavendra K T #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1089bfd99ff5SAvi Kivity #endif
109009a6e1f4SMarcelo Tosatti 
1091