xref: /openbmc/linux/include/linux/kvm_host.h (revision 83f09228d068911ac8797ae8d6febef886698936)
1edf88417SAvi Kivity #ifndef __KVM_HOST_H
2edf88417SAvi Kivity #define __KVM_HOST_H
3edf88417SAvi Kivity 
4edf88417SAvi Kivity /*
5edf88417SAvi Kivity  * This work is licensed under the terms of the GNU GPL, version 2.  See
6edf88417SAvi Kivity  * the COPYING file in the top-level directory.
7edf88417SAvi Kivity  */
8edf88417SAvi Kivity 
9edf88417SAvi Kivity #include <linux/types.h>
10edf88417SAvi Kivity #include <linux/hardirq.h>
11edf88417SAvi Kivity #include <linux/list.h>
12edf88417SAvi Kivity #include <linux/mutex.h>
13edf88417SAvi Kivity #include <linux/spinlock.h>
14edf88417SAvi Kivity #include <linux/signal.h>
15edf88417SAvi Kivity #include <linux/sched.h>
16187f1882SPaul Gortmaker #include <linux/bug.h>
17edf88417SAvi Kivity #include <linux/mm.h>
18b297e672SEric B Munson #include <linux/mmu_notifier.h>
19edf88417SAvi Kivity #include <linux/preempt.h>
200937c48dSSheng Yang #include <linux/msi.h>
21d89f5effSJan Kiszka #include <linux/slab.h>
22bd2b53b2SMichael S. Tsirkin #include <linux/rcupdate.h>
23bd80158aSJan Kiszka #include <linux/ratelimit.h>
24*83f09228SXiao Guangrong #include <linux/err.h>
25edf88417SAvi Kivity #include <asm/signal.h>
26edf88417SAvi Kivity 
27edf88417SAvi Kivity #include <linux/kvm.h>
28edf88417SAvi Kivity #include <linux/kvm_para.h>
29edf88417SAvi Kivity 
30edf88417SAvi Kivity #include <linux/kvm_types.h>
31edf88417SAvi Kivity 
32edf88417SAvi Kivity #include <asm/kvm_host.h>
33edf88417SAvi Kivity 
34cef4dea0SAvi Kivity #ifndef KVM_MMIO_SIZE
35cef4dea0SAvi Kivity #define KVM_MMIO_SIZE 8
36cef4dea0SAvi Kivity #endif
37cef4dea0SAvi Kivity 
38edf88417SAvi Kivity /*
39f78146b0SAvi Kivity  * If we support unaligned MMIO, at most one fragment will be split into two:
40f78146b0SAvi Kivity  */
41f78146b0SAvi Kivity #ifdef KVM_UNALIGNED_MMIO
42f78146b0SAvi Kivity #  define KVM_EXTRA_MMIO_FRAGMENTS 1
43f78146b0SAvi Kivity #else
44f78146b0SAvi Kivity #  define KVM_EXTRA_MMIO_FRAGMENTS 0
45f78146b0SAvi Kivity #endif
46f78146b0SAvi Kivity 
47f78146b0SAvi Kivity #define KVM_USER_MMIO_SIZE 8
48f78146b0SAvi Kivity 
49f78146b0SAvi Kivity #define KVM_MAX_MMIO_FRAGMENTS \
50f78146b0SAvi Kivity 	(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
51f78146b0SAvi Kivity 
526c8ee57bSXiao Guangrong #define KVM_PFN_ERR_FAULT	(-EFAULT)
53e6c1502bSXiao Guangrong #define KVM_PFN_ERR_HWPOISON	(-EHWPOISON)
54950e9509SXiao Guangrong #define KVM_PFN_ERR_BAD		(-ENOENT)
556c8ee57bSXiao Guangrong 
56*83f09228SXiao Guangrong static inline int is_error_pfn(pfn_t pfn)
57*83f09228SXiao Guangrong {
58*83f09228SXiao Guangrong 	return IS_ERR_VALUE(pfn);
59*83f09228SXiao Guangrong }
60*83f09228SXiao Guangrong 
61*83f09228SXiao Guangrong static inline int is_noslot_pfn(pfn_t pfn)
62*83f09228SXiao Guangrong {
63*83f09228SXiao Guangrong 	return pfn == -ENOENT;
64*83f09228SXiao Guangrong }
65*83f09228SXiao Guangrong 
66*83f09228SXiao Guangrong static inline int is_invalid_pfn(pfn_t pfn)
67*83f09228SXiao Guangrong {
68*83f09228SXiao Guangrong 	return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
69*83f09228SXiao Guangrong }
70*83f09228SXiao Guangrong 
71f78146b0SAvi Kivity /*
72edf88417SAvi Kivity  * vcpu->requests bit members
73edf88417SAvi Kivity  */
74edf88417SAvi Kivity #define KVM_REQ_TLB_FLUSH          0
752f52d58cSAvi Kivity #define KVM_REQ_MIGRATE_TIMER      1
76b209749fSAvi Kivity #define KVM_REQ_REPORT_TPR_ACCESS  2
772e53d63aSMarcelo Tosatti #define KVM_REQ_MMU_RELOAD         3
7871c4dfafSJoerg Roedel #define KVM_REQ_TRIPLE_FAULT       4
7906e05645SMarcelo Tosatti #define KVM_REQ_PENDING_TIMER      5
80d7690175SMarcelo Tosatti #define KVM_REQ_UNHALT             6
814731d4c7SMarcelo Tosatti #define KVM_REQ_MMU_SYNC           7
8234c238a1SZachary Amsden #define KVM_REQ_CLOCK_UPDATE       8
8332f88400SMarcelo Tosatti #define KVM_REQ_KICK               9
8402daab21SAvi Kivity #define KVM_REQ_DEACTIVATE_FPU    10
853842d135SAvi Kivity #define KVM_REQ_EVENT             11
86af585b92SGleb Natapov #define KVM_REQ_APF_HALT          12
87c9aaa895SGlauber Costa #define KVM_REQ_STEAL_UPDATE      13
887460fb4aSAvi Kivity #define KVM_REQ_NMI               14
89d6185f20SNadav Har'El #define KVM_REQ_IMMEDIATE_EXIT    15
90f5132b01SGleb Natapov #define KVM_REQ_PMU               16
91f5132b01SGleb Natapov #define KVM_REQ_PMI               17
92edf88417SAvi Kivity 
935550af4dSSheng Yang #define KVM_USERSPACE_IRQ_SOURCE_ID	0
945550af4dSSheng Yang 
956c474694SMichael S. Tsirkin struct kvm;
96edf88417SAvi Kivity struct kvm_vcpu;
97edf88417SAvi Kivity extern struct kmem_cache *kvm_vcpu_cache;
98edf88417SAvi Kivity 
99743eeb0bSSasha Levin struct kvm_io_range {
100743eeb0bSSasha Levin 	gpa_t addr;
101743eeb0bSSasha Levin 	int len;
102743eeb0bSSasha Levin 	struct kvm_io_device *dev;
103743eeb0bSSasha Levin };
104743eeb0bSSasha Levin 
105786a9f88SAmos Kong #define NR_IOBUS_DEVS 1000
106a1300716SAmos Kong 
107edf88417SAvi Kivity struct kvm_io_bus {
108edf88417SAvi Kivity 	int                   dev_count;
109a1300716SAmos Kong 	struct kvm_io_range range[];
110edf88417SAvi Kivity };
111edf88417SAvi Kivity 
112e93f8a0fSMarcelo Tosatti enum kvm_bus {
113e93f8a0fSMarcelo Tosatti 	KVM_MMIO_BUS,
114e93f8a0fSMarcelo Tosatti 	KVM_PIO_BUS,
115e93f8a0fSMarcelo Tosatti 	KVM_NR_BUSES
116e93f8a0fSMarcelo Tosatti };
117e93f8a0fSMarcelo Tosatti 
118e93f8a0fSMarcelo Tosatti int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
119e93f8a0fSMarcelo Tosatti 		     int len, const void *val);
120e93f8a0fSMarcelo Tosatti int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
121bda9020eSMichael S. Tsirkin 		    void *val);
122743eeb0bSSasha Levin int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
123743eeb0bSSasha Levin 			    int len, struct kvm_io_device *dev);
124e93f8a0fSMarcelo Tosatti int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
125edf88417SAvi Kivity 			      struct kvm_io_device *dev);
126edf88417SAvi Kivity 
127af585b92SGleb Natapov #ifdef CONFIG_KVM_ASYNC_PF
128af585b92SGleb Natapov struct kvm_async_pf {
129af585b92SGleb Natapov 	struct work_struct work;
130af585b92SGleb Natapov 	struct list_head link;
131af585b92SGleb Natapov 	struct list_head queue;
132af585b92SGleb Natapov 	struct kvm_vcpu *vcpu;
133af585b92SGleb Natapov 	struct mm_struct *mm;
134af585b92SGleb Natapov 	gva_t gva;
135af585b92SGleb Natapov 	unsigned long addr;
136af585b92SGleb Natapov 	struct kvm_arch_async_pf arch;
137af585b92SGleb Natapov 	struct page *page;
138af585b92SGleb Natapov 	bool done;
139af585b92SGleb Natapov };
140af585b92SGleb Natapov 
141af585b92SGleb Natapov void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
142af585b92SGleb Natapov void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
143af585b92SGleb Natapov int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
144af585b92SGleb Natapov 		       struct kvm_arch_async_pf *arch);
145344d9588SGleb Natapov int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
146af585b92SGleb Natapov #endif
147af585b92SGleb Natapov 
1486b7e2d09SXiao Guangrong enum {
1496b7e2d09SXiao Guangrong 	OUTSIDE_GUEST_MODE,
1506b7e2d09SXiao Guangrong 	IN_GUEST_MODE,
151c142786cSAvi Kivity 	EXITING_GUEST_MODE,
152c142786cSAvi Kivity 	READING_SHADOW_PAGE_TABLES,
1536b7e2d09SXiao Guangrong };
1546b7e2d09SXiao Guangrong 
155f78146b0SAvi Kivity /*
156f78146b0SAvi Kivity  * Sometimes a large or cross-page mmio needs to be broken up into separate
157f78146b0SAvi Kivity  * exits for userspace servicing.
158f78146b0SAvi Kivity  */
159f78146b0SAvi Kivity struct kvm_mmio_fragment {
160f78146b0SAvi Kivity 	gpa_t gpa;
161f78146b0SAvi Kivity 	void *data;
162f78146b0SAvi Kivity 	unsigned len;
163f78146b0SAvi Kivity };
164f78146b0SAvi Kivity 
165edf88417SAvi Kivity struct kvm_vcpu {
166edf88417SAvi Kivity 	struct kvm *kvm;
16731bb117eSHollis Blanchard #ifdef CONFIG_PREEMPT_NOTIFIERS
168edf88417SAvi Kivity 	struct preempt_notifier preempt_notifier;
16931bb117eSHollis Blanchard #endif
170edf88417SAvi Kivity 	int cpu;
1716b7e2d09SXiao Guangrong 	int vcpu_id;
1726b7e2d09SXiao Guangrong 	int srcu_idx;
1736b7e2d09SXiao Guangrong 	int mode;
174edf88417SAvi Kivity 	unsigned long requests;
175d0bfb940SJan Kiszka 	unsigned long guest_debug;
1766b7e2d09SXiao Guangrong 
1776b7e2d09SXiao Guangrong 	struct mutex mutex;
1786b7e2d09SXiao Guangrong 	struct kvm_run *run;
179f656ce01SMarcelo Tosatti 
180edf88417SAvi Kivity 	int fpu_active;
1812acf923eSDexuan Cui 	int guest_fpu_loaded, guest_xcr0_loaded;
182edf88417SAvi Kivity 	wait_queue_head_t wq;
18334bb10b7SRik van Riel 	struct pid *pid;
184edf88417SAvi Kivity 	int sigset_active;
185edf88417SAvi Kivity 	sigset_t sigset;
186edf88417SAvi Kivity 	struct kvm_vcpu_stat stat;
187edf88417SAvi Kivity 
188edf88417SAvi Kivity #ifdef CONFIG_HAS_IOMEM
189edf88417SAvi Kivity 	int mmio_needed;
190edf88417SAvi Kivity 	int mmio_read_completed;
191edf88417SAvi Kivity 	int mmio_is_write;
192f78146b0SAvi Kivity 	int mmio_cur_fragment;
193f78146b0SAvi Kivity 	int mmio_nr_fragments;
194f78146b0SAvi Kivity 	struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
195edf88417SAvi Kivity #endif
196edf88417SAvi Kivity 
197af585b92SGleb Natapov #ifdef CONFIG_KVM_ASYNC_PF
198af585b92SGleb Natapov 	struct {
199af585b92SGleb Natapov 		u32 queued;
200af585b92SGleb Natapov 		struct list_head queue;
201af585b92SGleb Natapov 		struct list_head done;
202af585b92SGleb Natapov 		spinlock_t lock;
203af585b92SGleb Natapov 	} async_pf;
204af585b92SGleb Natapov #endif
205af585b92SGleb Natapov 
2064c088493SRaghavendra K T #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
2074c088493SRaghavendra K T 	/*
2084c088493SRaghavendra K T 	 * Cpu relax intercept or pause loop exit optimization
2094c088493SRaghavendra K T 	 * in_spin_loop: set when a vcpu does a pause loop exit
2104c088493SRaghavendra K T 	 *  or cpu relax intercepted.
2114c088493SRaghavendra K T 	 * dy_eligible: indicates whether vcpu is eligible for directed yield.
2124c088493SRaghavendra K T 	 */
2134c088493SRaghavendra K T 	struct {
2144c088493SRaghavendra K T 		bool in_spin_loop;
2154c088493SRaghavendra K T 		bool dy_eligible;
2164c088493SRaghavendra K T 	} spin_loop;
2174c088493SRaghavendra K T #endif
218edf88417SAvi Kivity 	struct kvm_vcpu_arch arch;
219edf88417SAvi Kivity };
220edf88417SAvi Kivity 
2216b7e2d09SXiao Guangrong static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
2226b7e2d09SXiao Guangrong {
2236b7e2d09SXiao Guangrong 	return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
2246b7e2d09SXiao Guangrong }
2256b7e2d09SXiao Guangrong 
226660c22c4STakuya Yoshikawa /*
227660c22c4STakuya Yoshikawa  * Some of the bitops functions do not support too long bitmaps.
228660c22c4STakuya Yoshikawa  * This number must be determined not to exceed such limits.
229660c22c4STakuya Yoshikawa  */
230660c22c4STakuya Yoshikawa #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
231660c22c4STakuya Yoshikawa 
232edf88417SAvi Kivity struct kvm_memory_slot {
233edf88417SAvi Kivity 	gfn_t base_gfn;
234edf88417SAvi Kivity 	unsigned long npages;
235edf88417SAvi Kivity 	unsigned long flags;
236edf88417SAvi Kivity 	unsigned long *dirty_bitmap;
237db3fe4ebSTakuya Yoshikawa 	struct kvm_arch_memory_slot arch;
238edf88417SAvi Kivity 	unsigned long userspace_addr;
239edf88417SAvi Kivity 	int user_alloc;
240e36d96f7SAvi Kivity 	int id;
241edf88417SAvi Kivity };
242edf88417SAvi Kivity 
24387bf6e7dSTakuya Yoshikawa static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
24487bf6e7dSTakuya Yoshikawa {
24587bf6e7dSTakuya Yoshikawa 	return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
24687bf6e7dSTakuya Yoshikawa }
24787bf6e7dSTakuya Yoshikawa 
248399ec807SAvi Kivity struct kvm_kernel_irq_routing_entry {
249399ec807SAvi Kivity 	u32 gsi;
2505116d8f6SMichael S. Tsirkin 	u32 type;
2514925663aSGleb Natapov 	int (*set)(struct kvm_kernel_irq_routing_entry *e,
2521a6e4a8cSGleb Natapov 		   struct kvm *kvm, int irq_source_id, int level);
253399ec807SAvi Kivity 	union {
254399ec807SAvi Kivity 		struct {
255399ec807SAvi Kivity 			unsigned irqchip;
256399ec807SAvi Kivity 			unsigned pin;
257399ec807SAvi Kivity 		} irqchip;
25879950e10SSheng Yang 		struct msi_msg msi;
259399ec807SAvi Kivity 	};
26046e624b9SGleb Natapov 	struct hlist_node link;
26146e624b9SGleb Natapov };
26246e624b9SGleb Natapov 
2633e71f88bSGleb Natapov #ifdef __KVM_HAVE_IOAPIC
2643e71f88bSGleb Natapov 
26546e624b9SGleb Natapov struct kvm_irq_routing_table {
2663e71f88bSGleb Natapov 	int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
26746e624b9SGleb Natapov 	struct kvm_kernel_irq_routing_entry *rt_entries;
26846e624b9SGleb Natapov 	u32 nr_rt_entries;
26946e624b9SGleb Natapov 	/*
27046e624b9SGleb Natapov 	 * Array indexed by gsi. Each entry contains list of irq chips
27146e624b9SGleb Natapov 	 * the gsi is connected to.
27246e624b9SGleb Natapov 	 */
27346e624b9SGleb Natapov 	struct hlist_head map[0];
274399ec807SAvi Kivity };
275399ec807SAvi Kivity 
2763e71f88bSGleb Natapov #else
2773e71f88bSGleb Natapov 
2783e71f88bSGleb Natapov struct kvm_irq_routing_table {};
2793e71f88bSGleb Natapov 
2803e71f88bSGleb Natapov #endif
2813e71f88bSGleb Natapov 
28293a5cef0SXiao Guangrong #ifndef KVM_MEM_SLOTS_NUM
28393a5cef0SXiao Guangrong #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
28493a5cef0SXiao Guangrong #endif
28593a5cef0SXiao Guangrong 
286bf3e05bcSXiao Guangrong /*
287bf3e05bcSXiao Guangrong  * Note:
288bf3e05bcSXiao Guangrong  * memslots are not sorted by id anymore, please use id_to_memslot()
289bf3e05bcSXiao Guangrong  * to get the memslot by its id.
290bf3e05bcSXiao Guangrong  */
29146a26bf5SMarcelo Tosatti struct kvm_memslots {
29249c7754cSGleb Natapov 	u64 generation;
29393a5cef0SXiao Guangrong 	struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
294f85e2cb5SXiao Guangrong 	/* The mapping table from slot id to the index in memslots[]. */
295f85e2cb5SXiao Guangrong 	int id_to_index[KVM_MEM_SLOTS_NUM];
29646a26bf5SMarcelo Tosatti };
29746a26bf5SMarcelo Tosatti 
298edf88417SAvi Kivity struct kvm {
299aaee2c94SMarcelo Tosatti 	spinlock_t mmu_lock;
30079fac95eSMarcelo Tosatti 	struct mutex slots_lock;
301edf88417SAvi Kivity 	struct mm_struct *mm; /* userspace tied to this vm */
30246a26bf5SMarcelo Tosatti 	struct kvm_memslots *memslots;
303bc6678a3SMarcelo Tosatti 	struct srcu_struct srcu;
30473880c80SGleb Natapov #ifdef CONFIG_KVM_APIC_ARCHITECTURE
30573880c80SGleb Natapov 	u32 bsp_vcpu_id;
30673880c80SGleb Natapov #endif
307edf88417SAvi Kivity 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
30873880c80SGleb Natapov 	atomic_t online_vcpus;
309217ece61SRik van Riel 	int last_boosted_vcpu;
310edf88417SAvi Kivity 	struct list_head vm_list;
31160eead79SMarcelo Tosatti 	struct mutex lock;
312e93f8a0fSMarcelo Tosatti 	struct kvm_io_bus *buses[KVM_NR_BUSES];
313721eecbfSGregory Haskins #ifdef CONFIG_HAVE_KVM_EVENTFD
314721eecbfSGregory Haskins 	struct {
315721eecbfSGregory Haskins 		spinlock_t        lock;
316721eecbfSGregory Haskins 		struct list_head  items;
317721eecbfSGregory Haskins 	} irqfds;
318d34e6b17SGregory Haskins 	struct list_head ioeventfds;
319721eecbfSGregory Haskins #endif
320edf88417SAvi Kivity 	struct kvm_vm_stat stat;
321edf88417SAvi Kivity 	struct kvm_arch arch;
322d39f13b0SIzik Eidus 	atomic_t users_count;
3235f94c174SLaurent Vivier #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
3245f94c174SLaurent Vivier 	struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
3252b3c246aSSasha Levin 	spinlock_t ring_lock;
3262b3c246aSSasha Levin 	struct list_head coalesced_zones;
3275f94c174SLaurent Vivier #endif
328e930bffeSAndrea Arcangeli 
32960eead79SMarcelo Tosatti 	struct mutex irq_lock;
33075858a84SAvi Kivity #ifdef CONFIG_HAVE_KVM_IRQCHIP
331bd2b53b2SMichael S. Tsirkin 	/*
332bd2b53b2SMichael S. Tsirkin 	 * Update side is protected by irq_lock and,
333bd2b53b2SMichael S. Tsirkin 	 * if configured, irqfds.lock.
334bd2b53b2SMichael S. Tsirkin 	 */
3354b6a2872SArnd Bergmann 	struct kvm_irq_routing_table __rcu *irq_routing;
33675858a84SAvi Kivity 	struct hlist_head mask_notifier_list;
337136bdfeeSGleb Natapov 	struct hlist_head irq_ack_notifier_list;
33875858a84SAvi Kivity #endif
33975858a84SAvi Kivity 
34036c1ed82SMarc Zyngier #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
341e930bffeSAndrea Arcangeli 	struct mmu_notifier mmu_notifier;
342e930bffeSAndrea Arcangeli 	unsigned long mmu_notifier_seq;
343e930bffeSAndrea Arcangeli 	long mmu_notifier_count;
344e930bffeSAndrea Arcangeli #endif
3455c663a15SAvi Kivity 	long tlbs_dirty;
346edf88417SAvi Kivity };
347edf88417SAvi Kivity 
348a737f256SChristoffer Dall #define kvm_err(fmt, ...) \
349a737f256SChristoffer Dall 	pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
350a737f256SChristoffer Dall #define kvm_info(fmt, ...) \
351a737f256SChristoffer Dall 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
352a737f256SChristoffer Dall #define kvm_debug(fmt, ...) \
353a737f256SChristoffer Dall 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
354a737f256SChristoffer Dall #define kvm_pr_unimpl(fmt, ...) \
355a737f256SChristoffer Dall 	pr_err_ratelimited("kvm [%i]: " fmt, \
356a737f256SChristoffer Dall 			   task_tgid_nr(current), ## __VA_ARGS__)
357edf88417SAvi Kivity 
358a737f256SChristoffer Dall /* The guest did something we don't support. */
359a737f256SChristoffer Dall #define vcpu_unimpl(vcpu, fmt, ...)					\
360a737f256SChristoffer Dall 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
361edf88417SAvi Kivity 
362988a2caeSGleb Natapov static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
363988a2caeSGleb Natapov {
364988a2caeSGleb Natapov 	smp_rmb();
365988a2caeSGleb Natapov 	return kvm->vcpus[i];
366988a2caeSGleb Natapov }
367988a2caeSGleb Natapov 
368988a2caeSGleb Natapov #define kvm_for_each_vcpu(idx, vcpup, kvm) \
369b42fc3cbSJeff Mahoney 	for (idx = 0; \
370b42fc3cbSJeff Mahoney 	     idx < atomic_read(&kvm->online_vcpus) && \
371b42fc3cbSJeff Mahoney 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
372b42fc3cbSJeff Mahoney 	     idx++)
373988a2caeSGleb Natapov 
374be6ba0f0SXiao Guangrong #define kvm_for_each_memslot(memslot, slots)	\
375be6ba0f0SXiao Guangrong 	for (memslot = &slots->memslots[0];	\
376bf3e05bcSXiao Guangrong 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
377bf3e05bcSXiao Guangrong 		memslot++)
378be6ba0f0SXiao Guangrong 
379edf88417SAvi Kivity int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
380edf88417SAvi Kivity void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
381edf88417SAvi Kivity 
382edf88417SAvi Kivity void vcpu_load(struct kvm_vcpu *vcpu);
383edf88417SAvi Kivity void vcpu_put(struct kvm_vcpu *vcpu);
384edf88417SAvi Kivity 
3850ee75beaSAvi Kivity int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
386edf88417SAvi Kivity 		  struct module *module);
387edf88417SAvi Kivity void kvm_exit(void);
388edf88417SAvi Kivity 
389d39f13b0SIzik Eidus void kvm_get_kvm(struct kvm *kvm);
390d39f13b0SIzik Eidus void kvm_put_kvm(struct kvm *kvm);
391be593d62SXiao Guangrong void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
392d39f13b0SIzik Eidus 
39390d83dc3SLai Jiangshan static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
39490d83dc3SLai Jiangshan {
39590d83dc3SLai Jiangshan 	return rcu_dereference_check(kvm->memslots,
39690d83dc3SLai Jiangshan 			srcu_read_lock_held(&kvm->srcu)
39790d83dc3SLai Jiangshan 			|| lockdep_is_held(&kvm->slots_lock));
39890d83dc3SLai Jiangshan }
39990d83dc3SLai Jiangshan 
40028a37544SXiao Guangrong static inline struct kvm_memory_slot *
40128a37544SXiao Guangrong id_to_memslot(struct kvm_memslots *slots, int id)
40228a37544SXiao Guangrong {
403f85e2cb5SXiao Guangrong 	int index = slots->id_to_index[id];
404f85e2cb5SXiao Guangrong 	struct kvm_memory_slot *slot;
405bf3e05bcSXiao Guangrong 
406f85e2cb5SXiao Guangrong 	slot = &slots->memslots[index];
407bf3e05bcSXiao Guangrong 
408f85e2cb5SXiao Guangrong 	WARN_ON(slot->id != id);
409f85e2cb5SXiao Guangrong 	return slot;
41028a37544SXiao Guangrong }
41128a37544SXiao Guangrong 
412edf88417SAvi Kivity extern struct page *bad_page;
413edf88417SAvi Kivity 
414edf88417SAvi Kivity int is_error_page(struct page *page);
415edf88417SAvi Kivity int kvm_is_error_hva(unsigned long addr);
416edf88417SAvi Kivity int kvm_set_memory_region(struct kvm *kvm,
417edf88417SAvi Kivity 			  struct kvm_userspace_memory_region *mem,
418edf88417SAvi Kivity 			  int user_alloc);
419edf88417SAvi Kivity int __kvm_set_memory_region(struct kvm *kvm,
420edf88417SAvi Kivity 			    struct kvm_userspace_memory_region *mem,
421edf88417SAvi Kivity 			    int user_alloc);
422db3fe4ebSTakuya Yoshikawa void kvm_arch_free_memslot(struct kvm_memory_slot *free,
423db3fe4ebSTakuya Yoshikawa 			   struct kvm_memory_slot *dont);
424db3fe4ebSTakuya Yoshikawa int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
425f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm,
426f7784b8eSMarcelo Tosatti 				struct kvm_memory_slot *memslot,
427f7784b8eSMarcelo Tosatti 				struct kvm_memory_slot old,
428f7784b8eSMarcelo Tosatti 				struct kvm_userspace_memory_region *mem,
429f7784b8eSMarcelo Tosatti 				int user_alloc);
430f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm,
431edf88417SAvi Kivity 				struct kvm_userspace_memory_region *mem,
432edf88417SAvi Kivity 				struct kvm_memory_slot old,
433edf88417SAvi Kivity 				int user_alloc);
434db3fe4ebSTakuya Yoshikawa bool kvm_largepages_enabled(void);
43554dee993SMarcelo Tosatti void kvm_disable_largepages(void);
43634d4cb8fSMarcelo Tosatti void kvm_arch_flush_shadow(struct kvm *kvm);
437a983fb23SMarcelo Tosatti 
43848987781SXiao Guangrong int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
43948987781SXiao Guangrong 			    int nr_pages);
44048987781SXiao Guangrong 
441a2766325SXiao Guangrong struct page *get_bad_page(void);
442edf88417SAvi Kivity struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
44305da4558SMarcelo Tosatti unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
444edf88417SAvi Kivity void kvm_release_page_clean(struct page *page);
445edf88417SAvi Kivity void kvm_release_page_dirty(struct page *page);
44635149e21SAnthony Liguori void kvm_set_page_dirty(struct page *page);
44735149e21SAnthony Liguori void kvm_set_page_accessed(struct page *page);
44835149e21SAnthony Liguori 
449d5661048SXiao Guangrong pfn_t hva_to_pfn_atomic(unsigned long addr);
450365fb3fdSXiao Guangrong pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
451612819c3SMarcelo Tosatti pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
452612819c3SMarcelo Tosatti 		       bool write_fault, bool *writable);
45335149e21SAnthony Liguori pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
454612819c3SMarcelo Tosatti pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
455612819c3SMarcelo Tosatti 		      bool *writable);
456d5661048SXiao Guangrong pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
45735149e21SAnthony Liguori void kvm_release_pfn_dirty(pfn_t);
45835149e21SAnthony Liguori void kvm_release_pfn_clean(pfn_t pfn);
45935149e21SAnthony Liguori void kvm_set_pfn_dirty(pfn_t pfn);
46035149e21SAnthony Liguori void kvm_set_pfn_accessed(pfn_t pfn);
46135149e21SAnthony Liguori void kvm_get_pfn(pfn_t pfn);
46235149e21SAnthony Liguori 
463edf88417SAvi Kivity int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
464edf88417SAvi Kivity 			int len);
4657ec54588SMarcelo Tosatti int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
4667ec54588SMarcelo Tosatti 			  unsigned long len);
467edf88417SAvi Kivity int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
468e03b644fSGleb Natapov int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
469e03b644fSGleb Natapov 			   void *data, unsigned long len);
470edf88417SAvi Kivity int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
471edf88417SAvi Kivity 			 int offset, int len);
472edf88417SAvi Kivity int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
473edf88417SAvi Kivity 		    unsigned long len);
47449c7754cSGleb Natapov int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
47549c7754cSGleb Natapov 			   void *data, unsigned long len);
47649c7754cSGleb Natapov int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
47749c7754cSGleb Natapov 			      gpa_t gpa);
478edf88417SAvi Kivity int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
479edf88417SAvi Kivity int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
480edf88417SAvi Kivity struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
481edf88417SAvi Kivity int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
4828f0b1ab6SJoerg Roedel unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
483edf88417SAvi Kivity void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
48449c7754cSGleb Natapov void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
48549c7754cSGleb Natapov 			     gfn_t gfn);
486edf88417SAvi Kivity 
487edf88417SAvi Kivity void kvm_vcpu_block(struct kvm_vcpu *vcpu);
488b6d33834SChristoffer Dall void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
48941628d33SKonstantin Weitz bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
490d255f4f2SZhai, Edwin void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
491edf88417SAvi Kivity void kvm_resched(struct kvm_vcpu *vcpu);
492edf88417SAvi Kivity void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
493edf88417SAvi Kivity void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
494a4ee1ca4SXiao Guangrong 
495edf88417SAvi Kivity void kvm_flush_remote_tlbs(struct kvm *kvm);
4962e53d63aSMarcelo Tosatti void kvm_reload_remote_mmus(struct kvm *kvm);
497edf88417SAvi Kivity 
498edf88417SAvi Kivity long kvm_arch_dev_ioctl(struct file *filp,
499edf88417SAvi Kivity 			unsigned int ioctl, unsigned long arg);
500edf88417SAvi Kivity long kvm_arch_vcpu_ioctl(struct file *filp,
501edf88417SAvi Kivity 			 unsigned int ioctl, unsigned long arg);
5025b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
503edf88417SAvi Kivity 
504edf88417SAvi Kivity int kvm_dev_ioctl_check_extension(long ext);
505edf88417SAvi Kivity 
506edf88417SAvi Kivity int kvm_get_dirty_log(struct kvm *kvm,
507edf88417SAvi Kivity 			struct kvm_dirty_log *log, int *is_dirty);
508edf88417SAvi Kivity int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
509edf88417SAvi Kivity 				struct kvm_dirty_log *log);
510edf88417SAvi Kivity 
511edf88417SAvi Kivity int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
512edf88417SAvi Kivity 				   struct
513edf88417SAvi Kivity 				   kvm_userspace_memory_region *mem,
514edf88417SAvi Kivity 				   int user_alloc);
51523d43cf9SChristoffer Dall int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
516edf88417SAvi Kivity long kvm_arch_vm_ioctl(struct file *filp,
517edf88417SAvi Kivity 		       unsigned int ioctl, unsigned long arg);
518edf88417SAvi Kivity 
519edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
520edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
521edf88417SAvi Kivity 
522edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
523edf88417SAvi Kivity 				    struct kvm_translation *tr);
524edf88417SAvi Kivity 
525edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
526edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
527edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
528edf88417SAvi Kivity 				  struct kvm_sregs *sregs);
529edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
530edf88417SAvi Kivity 				  struct kvm_sregs *sregs);
53162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
53262d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state);
53362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
53462d9f0dbSMarcelo Tosatti 				    struct kvm_mp_state *mp_state);
535d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
536d0bfb940SJan Kiszka 					struct kvm_guest_debug *dbg);
537edf88417SAvi Kivity int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
538edf88417SAvi Kivity 
539edf88417SAvi Kivity int kvm_arch_init(void *opaque);
540edf88417SAvi Kivity void kvm_arch_exit(void);
541edf88417SAvi Kivity 
542edf88417SAvi Kivity int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
543edf88417SAvi Kivity void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
544edf88417SAvi Kivity 
545edf88417SAvi Kivity void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
546edf88417SAvi Kivity void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
547edf88417SAvi Kivity void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
548edf88417SAvi Kivity struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
549edf88417SAvi Kivity int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
550edf88417SAvi Kivity void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
551edf88417SAvi Kivity 
552edf88417SAvi Kivity int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
55310474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage);
554edf88417SAvi Kivity void kvm_arch_hardware_disable(void *garbage);
555edf88417SAvi Kivity int kvm_arch_hardware_setup(void);
556edf88417SAvi Kivity void kvm_arch_hardware_unsetup(void);
557edf88417SAvi Kivity void kvm_arch_check_processor_compat(void *rtn);
558edf88417SAvi Kivity int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
559b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
560edf88417SAvi Kivity 
561edf88417SAvi Kivity void kvm_free_physmem(struct kvm *kvm);
562edf88417SAvi Kivity 
563c1a7b32aSTakuya Yoshikawa void *kvm_kvzalloc(unsigned long size);
564c1a7b32aSTakuya Yoshikawa void kvm_kvfree(const void *addr);
565c1a7b32aSTakuya Yoshikawa 
566d89f5effSJan Kiszka #ifndef __KVM_HAVE_ARCH_VM_ALLOC
567d89f5effSJan Kiszka static inline struct kvm *kvm_arch_alloc_vm(void)
568d89f5effSJan Kiszka {
569d89f5effSJan Kiszka 	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
570d89f5effSJan Kiszka }
571d89f5effSJan Kiszka 
572d89f5effSJan Kiszka static inline void kvm_arch_free_vm(struct kvm *kvm)
573d89f5effSJan Kiszka {
574d89f5effSJan Kiszka 	kfree(kvm);
575d89f5effSJan Kiszka }
576d89f5effSJan Kiszka #endif
577d89f5effSJan Kiszka 
578b6d33834SChristoffer Dall static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
579b6d33834SChristoffer Dall {
5802246f8b5SAlexander Graf #ifdef __KVM_HAVE_ARCH_WQP
5812246f8b5SAlexander Graf 	return vcpu->arch.wqp;
5822246f8b5SAlexander Graf #else
583b6d33834SChristoffer Dall 	return &vcpu->wq;
584b6d33834SChristoffer Dall #endif
5852246f8b5SAlexander Graf }
586b6d33834SChristoffer Dall 
587e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
588edf88417SAvi Kivity void kvm_arch_destroy_vm(struct kvm *kvm);
5898a98f664SXiantao Zhang void kvm_free_all_assigned_devices(struct kvm *kvm);
590ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm);
591edf88417SAvi Kivity 
5923d80840dSMarcelo Tosatti int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
5935736199aSZhang Xiantao void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
594edf88417SAvi Kivity 
595a2766325SXiao Guangrong bool kvm_is_mmio_pfn(pfn_t pfn);
596c77fb9dcSXiantao Zhang 
59762c476c7SBen-Ami Yassour struct kvm_irq_ack_notifier {
59862c476c7SBen-Ami Yassour 	struct hlist_node link;
59962c476c7SBen-Ami Yassour 	unsigned gsi;
60062c476c7SBen-Ami Yassour 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
60162c476c7SBen-Ami Yassour };
60262c476c7SBen-Ami Yassour 
60362c476c7SBen-Ami Yassour struct kvm_assigned_dev_kernel {
60462c476c7SBen-Ami Yassour 	struct kvm_irq_ack_notifier ack_notifier;
60562c476c7SBen-Ami Yassour 	struct list_head list;
60662c476c7SBen-Ami Yassour 	int assigned_dev_id;
607ab9f4ecbSZhai, Edwin 	int host_segnr;
60862c476c7SBen-Ami Yassour 	int host_busnr;
60962c476c7SBen-Ami Yassour 	int host_devfn;
610c1e01514SSheng Yang 	unsigned int entries_nr;
61162c476c7SBen-Ami Yassour 	int host_irq;
612defaf158SMark McLoughlin 	bool host_irq_disabled;
61307700a94SJan Kiszka 	bool pci_2_3;
614c1e01514SSheng Yang 	struct msix_entry *host_msix_entries;
61562c476c7SBen-Ami Yassour 	int guest_irq;
6160645211cSJan Kiszka 	struct msix_entry *guest_msix_entries;
6174f906c19SSheng Yang 	unsigned long irq_requested_type;
6185550af4dSSheng Yang 	int irq_source_id;
619b653574aSWeidong Han 	int flags;
62062c476c7SBen-Ami Yassour 	struct pci_dev *dev;
62162c476c7SBen-Ami Yassour 	struct kvm *kvm;
6220645211cSJan Kiszka 	spinlock_t intx_lock;
623cf9eeac4SJan Kiszka 	spinlock_t intx_mask_lock;
6241e001d49SJan Kiszka 	char irq_name[32];
625f8fcfd77SAlex Williamson 	struct pci_saved_state *pci_saved_state;
62662c476c7SBen-Ami Yassour };
62775858a84SAvi Kivity 
62875858a84SAvi Kivity struct kvm_irq_mask_notifier {
62975858a84SAvi Kivity 	void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
63075858a84SAvi Kivity 	int irq;
63175858a84SAvi Kivity 	struct hlist_node link;
63275858a84SAvi Kivity };
63375858a84SAvi Kivity 
63475858a84SAvi Kivity void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
63575858a84SAvi Kivity 				    struct kvm_irq_mask_notifier *kimn);
63675858a84SAvi Kivity void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
63775858a84SAvi Kivity 				      struct kvm_irq_mask_notifier *kimn);
6384a994358SGleb Natapov void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
6394a994358SGleb Natapov 			     bool mask);
64075858a84SAvi Kivity 
64146e624b9SGleb Natapov #ifdef __KVM_HAVE_IOAPIC
64246e624b9SGleb Natapov void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
64346e624b9SGleb Natapov 				   union kvm_ioapic_redirect_entry *entry,
64446e624b9SGleb Natapov 				   unsigned long *deliver_bitmask);
64546e624b9SGleb Natapov #endif
64646e624b9SGleb Natapov int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
647bd2b53b2SMichael S. Tsirkin int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
648bd2b53b2SMichael S. Tsirkin 		int irq_source_id, int level);
64944882eedSMarcelo Tosatti void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
6503de42dc0SXiantao Zhang void kvm_register_irq_ack_notifier(struct kvm *kvm,
6513de42dc0SXiantao Zhang 				   struct kvm_irq_ack_notifier *kian);
652fa40a821SMarcelo Tosatti void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
653fa40a821SMarcelo Tosatti 				   struct kvm_irq_ack_notifier *kian);
6545550af4dSSheng Yang int kvm_request_irq_source_id(struct kvm *kvm);
6555550af4dSSheng Yang void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
65662c476c7SBen-Ami Yassour 
657522c68c4SSheng Yang /* For vcpu->arch.iommu_flags */
658522c68c4SSheng Yang #define KVM_IOMMU_CACHE_COHERENCY	0x1
659522c68c4SSheng Yang 
66019de40a8SJoerg Roedel #ifdef CONFIG_IOMMU_API
6613ad26d81SMarcelo Tosatti int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
66232f6daadSAlex Williamson void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
663260782bcSWeidong Han int kvm_iommu_map_guest(struct kvm *kvm);
66462c476c7SBen-Ami Yassour int kvm_iommu_unmap_guest(struct kvm *kvm);
665260782bcSWeidong Han int kvm_assign_device(struct kvm *kvm,
666260782bcSWeidong Han 		      struct kvm_assigned_dev_kernel *assigned_dev);
6670a920356SWeidong Han int kvm_deassign_device(struct kvm *kvm,
6680a920356SWeidong Han 			struct kvm_assigned_dev_kernel *assigned_dev);
66919de40a8SJoerg Roedel #else /* CONFIG_IOMMU_API */
67062c476c7SBen-Ami Yassour static inline int kvm_iommu_map_pages(struct kvm *kvm,
671d7a79b6cSJan Kiszka 				      struct kvm_memory_slot *slot)
67262c476c7SBen-Ami Yassour {
67362c476c7SBen-Ami Yassour 	return 0;
67462c476c7SBen-Ami Yassour }
67562c476c7SBen-Ami Yassour 
67632f6daadSAlex Williamson static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
67732f6daadSAlex Williamson 					 struct kvm_memory_slot *slot)
67832f6daadSAlex Williamson {
67932f6daadSAlex Williamson }
68032f6daadSAlex Williamson 
681260782bcSWeidong Han static inline int kvm_iommu_map_guest(struct kvm *kvm)
68262c476c7SBen-Ami Yassour {
68362c476c7SBen-Ami Yassour 	return -ENODEV;
68462c476c7SBen-Ami Yassour }
68562c476c7SBen-Ami Yassour 
68662c476c7SBen-Ami Yassour static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
68762c476c7SBen-Ami Yassour {
68862c476c7SBen-Ami Yassour 	return 0;
68962c476c7SBen-Ami Yassour }
690260782bcSWeidong Han 
691260782bcSWeidong Han static inline int kvm_assign_device(struct kvm *kvm,
692260782bcSWeidong Han 		struct kvm_assigned_dev_kernel *assigned_dev)
693260782bcSWeidong Han {
694260782bcSWeidong Han 	return 0;
695260782bcSWeidong Han }
6960a920356SWeidong Han 
6970a920356SWeidong Han static inline int kvm_deassign_device(struct kvm *kvm,
6980a920356SWeidong Han 		struct kvm_assigned_dev_kernel *assigned_dev)
6990a920356SWeidong Han {
7000a920356SWeidong Han 	return 0;
7010a920356SWeidong Han }
70219de40a8SJoerg Roedel #endif /* CONFIG_IOMMU_API */
70362c476c7SBen-Ami Yassour 
704edf88417SAvi Kivity static inline void kvm_guest_enter(void)
705edf88417SAvi Kivity {
7068fa22068SGleb Natapov 	BUG_ON(preemptible());
707edf88417SAvi Kivity 	account_system_vtime(current);
708edf88417SAvi Kivity 	current->flags |= PF_VCPU;
7098fa22068SGleb Natapov 	/* KVM does not hold any references to rcu protected data when it
7108fa22068SGleb Natapov 	 * switches CPU into a guest mode. In fact switching to a guest mode
7118fa22068SGleb Natapov 	 * is very similar to exiting to userspase from rcu point of view. In
7128fa22068SGleb Natapov 	 * addition CPU may stay in a guest mode for quite a long time (up to
7138fa22068SGleb Natapov 	 * one time slice). Lets treat guest mode as quiescent state, just like
7148fa22068SGleb Natapov 	 * we do with user-mode execution.
7158fa22068SGleb Natapov 	 */
7168fa22068SGleb Natapov 	rcu_virt_note_context_switch(smp_processor_id());
717edf88417SAvi Kivity }
718edf88417SAvi Kivity 
719edf88417SAvi Kivity static inline void kvm_guest_exit(void)
720edf88417SAvi Kivity {
721edf88417SAvi Kivity 	account_system_vtime(current);
722edf88417SAvi Kivity 	current->flags &= ~PF_VCPU;
723edf88417SAvi Kivity }
724edf88417SAvi Kivity 
7259d4cba7fSPaul Mackerras /*
7269d4cba7fSPaul Mackerras  * search_memslots() and __gfn_to_memslot() are here because they are
7279d4cba7fSPaul Mackerras  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
7289d4cba7fSPaul Mackerras  * gfn_to_memslot() itself isn't here as an inline because that would
7299d4cba7fSPaul Mackerras  * bloat other code too much.
7309d4cba7fSPaul Mackerras  */
7319d4cba7fSPaul Mackerras static inline struct kvm_memory_slot *
7329d4cba7fSPaul Mackerras search_memslots(struct kvm_memslots *slots, gfn_t gfn)
7339d4cba7fSPaul Mackerras {
7349d4cba7fSPaul Mackerras 	struct kvm_memory_slot *memslot;
7359d4cba7fSPaul Mackerras 
7369d4cba7fSPaul Mackerras 	kvm_for_each_memslot(memslot, slots)
7379d4cba7fSPaul Mackerras 		if (gfn >= memslot->base_gfn &&
7389d4cba7fSPaul Mackerras 		      gfn < memslot->base_gfn + memslot->npages)
7399d4cba7fSPaul Mackerras 			return memslot;
7409d4cba7fSPaul Mackerras 
7419d4cba7fSPaul Mackerras 	return NULL;
7429d4cba7fSPaul Mackerras }
7439d4cba7fSPaul Mackerras 
7449d4cba7fSPaul Mackerras static inline struct kvm_memory_slot *
7459d4cba7fSPaul Mackerras __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
7469d4cba7fSPaul Mackerras {
7479d4cba7fSPaul Mackerras 	return search_memslots(slots, gfn);
7489d4cba7fSPaul Mackerras }
7499d4cba7fSPaul Mackerras 
7500ee8dcb8SXiao Guangrong static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
7510ee8dcb8SXiao Guangrong {
7520ee8dcb8SXiao Guangrong 	return gfn_to_memslot(kvm, gfn)->id;
7530ee8dcb8SXiao Guangrong }
7540ee8dcb8SXiao Guangrong 
755fb03cb6fSTakuya Yoshikawa static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
756fb03cb6fSTakuya Yoshikawa {
757fb03cb6fSTakuya Yoshikawa 	/* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
758fb03cb6fSTakuya Yoshikawa 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
759fb03cb6fSTakuya Yoshikawa 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
760fb03cb6fSTakuya Yoshikawa }
761fb03cb6fSTakuya Yoshikawa 
762d19a748bSTakuya Yoshikawa static inline gfn_t
763d19a748bSTakuya Yoshikawa hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
764d19a748bSTakuya Yoshikawa {
765d19a748bSTakuya Yoshikawa 	gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
766d19a748bSTakuya Yoshikawa 
767d19a748bSTakuya Yoshikawa 	return slot->base_gfn + gfn_offset;
768d19a748bSTakuya Yoshikawa }
769d19a748bSTakuya Yoshikawa 
770887c08acSXiao Guangrong static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
771887c08acSXiao Guangrong 					       gfn_t gfn)
772887c08acSXiao Guangrong {
773887c08acSXiao Guangrong 	return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
774887c08acSXiao Guangrong }
775887c08acSXiao Guangrong 
776edf88417SAvi Kivity static inline gpa_t gfn_to_gpa(gfn_t gfn)
777edf88417SAvi Kivity {
778edf88417SAvi Kivity 	return (gpa_t)gfn << PAGE_SHIFT;
779edf88417SAvi Kivity }
780edf88417SAvi Kivity 
781c30a358dSJoerg Roedel static inline gfn_t gpa_to_gfn(gpa_t gpa)
782c30a358dSJoerg Roedel {
783c30a358dSJoerg Roedel 	return (gfn_t)(gpa >> PAGE_SHIFT);
784c30a358dSJoerg Roedel }
785c30a358dSJoerg Roedel 
78662c476c7SBen-Ami Yassour static inline hpa_t pfn_to_hpa(pfn_t pfn)
78762c476c7SBen-Ami Yassour {
78862c476c7SBen-Ami Yassour 	return (hpa_t)pfn << PAGE_SHIFT;
78962c476c7SBen-Ami Yassour }
79062c476c7SBen-Ami Yassour 
7912f599714SMarcelo Tosatti static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
7922f52d58cSAvi Kivity {
7932f52d58cSAvi Kivity 	set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
7942f52d58cSAvi Kivity }
7952f52d58cSAvi Kivity 
796edf88417SAvi Kivity enum kvm_stat_kind {
797edf88417SAvi Kivity 	KVM_STAT_VM,
798edf88417SAvi Kivity 	KVM_STAT_VCPU,
799edf88417SAvi Kivity };
800edf88417SAvi Kivity 
801edf88417SAvi Kivity struct kvm_stats_debugfs_item {
802edf88417SAvi Kivity 	const char *name;
803edf88417SAvi Kivity 	int offset;
804edf88417SAvi Kivity 	enum kvm_stat_kind kind;
805edf88417SAvi Kivity 	struct dentry *dentry;
806edf88417SAvi Kivity };
807edf88417SAvi Kivity extern struct kvm_stats_debugfs_item debugfs_entries[];
80876f7c879SHollis Blanchard extern struct dentry *kvm_debugfs_dir;
809d4c9ff2dSFeng(Eric) Liu 
81036c1ed82SMarc Zyngier #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
811e930bffeSAndrea Arcangeli static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
812e930bffeSAndrea Arcangeli {
813e930bffeSAndrea Arcangeli 	if (unlikely(vcpu->kvm->mmu_notifier_count))
814e930bffeSAndrea Arcangeli 		return 1;
815e930bffeSAndrea Arcangeli 	/*
816a355aa54SPaul Mackerras 	 * Ensure the read of mmu_notifier_count happens before the read
817a355aa54SPaul Mackerras 	 * of mmu_notifier_seq.  This interacts with the smp_wmb() in
818a355aa54SPaul Mackerras 	 * mmu_notifier_invalidate_range_end to make sure that the caller
819a355aa54SPaul Mackerras 	 * either sees the old (non-zero) value of mmu_notifier_count or
820a355aa54SPaul Mackerras 	 * the new (incremented) value of mmu_notifier_seq.
821a355aa54SPaul Mackerras 	 * PowerPC Book3s HV KVM calls this under a per-page lock
822a355aa54SPaul Mackerras 	 * rather than under kvm->mmu_lock, for scalability, so
823a355aa54SPaul Mackerras 	 * can't rely on kvm->mmu_lock to keep things ordered.
824e930bffeSAndrea Arcangeli 	 */
825a355aa54SPaul Mackerras 	smp_rmb();
826e930bffeSAndrea Arcangeli 	if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
827e930bffeSAndrea Arcangeli 		return 1;
828e930bffeSAndrea Arcangeli 	return 0;
829e930bffeSAndrea Arcangeli }
830e930bffeSAndrea Arcangeli #endif
831e930bffeSAndrea Arcangeli 
8329900b4b4SMarc Zyngier #ifdef KVM_CAP_IRQ_ROUTING
833399ec807SAvi Kivity 
834399ec807SAvi Kivity #define KVM_MAX_IRQ_ROUTES 1024
835399ec807SAvi Kivity 
836399ec807SAvi Kivity int kvm_setup_default_irq_routing(struct kvm *kvm);
837399ec807SAvi Kivity int kvm_set_irq_routing(struct kvm *kvm,
838399ec807SAvi Kivity 			const struct kvm_irq_routing_entry *entries,
839399ec807SAvi Kivity 			unsigned nr,
840399ec807SAvi Kivity 			unsigned flags);
841399ec807SAvi Kivity void kvm_free_irq_routing(struct kvm *kvm);
842399ec807SAvi Kivity 
84307975ad3SJan Kiszka int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
84407975ad3SJan Kiszka 
845399ec807SAvi Kivity #else
846399ec807SAvi Kivity 
847399ec807SAvi Kivity static inline void kvm_free_irq_routing(struct kvm *kvm) {}
848399ec807SAvi Kivity 
849399ec807SAvi Kivity #endif
850399ec807SAvi Kivity 
851721eecbfSGregory Haskins #ifdef CONFIG_HAVE_KVM_EVENTFD
852721eecbfSGregory Haskins 
853d34e6b17SGregory Haskins void kvm_eventfd_init(struct kvm *kvm);
854d4db2935SAlex Williamson int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
855721eecbfSGregory Haskins void kvm_irqfd_release(struct kvm *kvm);
856bd2b53b2SMichael S. Tsirkin void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
857d34e6b17SGregory Haskins int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
858721eecbfSGregory Haskins 
859721eecbfSGregory Haskins #else
860721eecbfSGregory Haskins 
861d34e6b17SGregory Haskins static inline void kvm_eventfd_init(struct kvm *kvm) {}
862bd2b53b2SMichael S. Tsirkin 
863d4db2935SAlex Williamson static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
864721eecbfSGregory Haskins {
865721eecbfSGregory Haskins 	return -EINVAL;
866721eecbfSGregory Haskins }
867721eecbfSGregory Haskins 
868721eecbfSGregory Haskins static inline void kvm_irqfd_release(struct kvm *kvm) {}
869bd2b53b2SMichael S. Tsirkin 
87027923eb1SAlexander Graf #ifdef CONFIG_HAVE_KVM_IRQCHIP
871bd2b53b2SMichael S. Tsirkin static inline void kvm_irq_routing_update(struct kvm *kvm,
872bd2b53b2SMichael S. Tsirkin 					  struct kvm_irq_routing_table *irq_rt)
873bd2b53b2SMichael S. Tsirkin {
874bd2b53b2SMichael S. Tsirkin 	rcu_assign_pointer(kvm->irq_routing, irq_rt);
875bd2b53b2SMichael S. Tsirkin }
87627923eb1SAlexander Graf #endif
877bd2b53b2SMichael S. Tsirkin 
878d34e6b17SGregory Haskins static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
879d34e6b17SGregory Haskins {
880d34e6b17SGregory Haskins 	return -ENOSYS;
881d34e6b17SGregory Haskins }
882721eecbfSGregory Haskins 
883721eecbfSGregory Haskins #endif /* CONFIG_HAVE_KVM_EVENTFD */
884721eecbfSGregory Haskins 
88573880c80SGleb Natapov #ifdef CONFIG_KVM_APIC_ARCHITECTURE
886c5af89b6SGleb Natapov static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
887c5af89b6SGleb Natapov {
888d3efc8efSMarcelo Tosatti 	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
889c5af89b6SGleb Natapov }
8903e515705SAvi Kivity 
8913e515705SAvi Kivity bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
8923e515705SAvi Kivity 
8933e515705SAvi Kivity #else
8943e515705SAvi Kivity 
8953e515705SAvi Kivity static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
8963e515705SAvi Kivity 
897edf88417SAvi Kivity #endif
898bfd99ff5SAvi Kivity 
899bfd99ff5SAvi Kivity #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
900bfd99ff5SAvi Kivity 
901bfd99ff5SAvi Kivity long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
902bfd99ff5SAvi Kivity 				  unsigned long arg);
903bfd99ff5SAvi Kivity 
904bfd99ff5SAvi Kivity #else
905bfd99ff5SAvi Kivity 
906bfd99ff5SAvi Kivity static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
907bfd99ff5SAvi Kivity 						unsigned long arg)
908bfd99ff5SAvi Kivity {
909bfd99ff5SAvi Kivity 	return -ENOTTY;
910bfd99ff5SAvi Kivity }
911bfd99ff5SAvi Kivity 
91273880c80SGleb Natapov #endif
913bfd99ff5SAvi Kivity 
914a8eeb04aSAvi Kivity static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
915a8eeb04aSAvi Kivity {
916a8eeb04aSAvi Kivity 	set_bit(req, &vcpu->requests);
917a8eeb04aSAvi Kivity }
918a8eeb04aSAvi Kivity 
919a8eeb04aSAvi Kivity static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
920a8eeb04aSAvi Kivity {
9210719837cSAvi Kivity 	if (test_bit(req, &vcpu->requests)) {
9220719837cSAvi Kivity 		clear_bit(req, &vcpu->requests);
9230719837cSAvi Kivity 		return true;
9240719837cSAvi Kivity 	} else {
9250719837cSAvi Kivity 		return false;
9260719837cSAvi Kivity 	}
927a8eeb04aSAvi Kivity }
928a8eeb04aSAvi Kivity 
9294c088493SRaghavendra K T #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
9304c088493SRaghavendra K T 
9314c088493SRaghavendra K T static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
9324c088493SRaghavendra K T {
9334c088493SRaghavendra K T 	vcpu->spin_loop.in_spin_loop = val;
9344c088493SRaghavendra K T }
9354c088493SRaghavendra K T static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
9364c088493SRaghavendra K T {
9374c088493SRaghavendra K T 	vcpu->spin_loop.dy_eligible = val;
9384c088493SRaghavendra K T }
9394c088493SRaghavendra K T 
9404c088493SRaghavendra K T #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
9414c088493SRaghavendra K T 
9424c088493SRaghavendra K T static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
9434c088493SRaghavendra K T {
9444c088493SRaghavendra K T }
9454c088493SRaghavendra K T 
9464c088493SRaghavendra K T static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
9474c088493SRaghavendra K T {
9484c088493SRaghavendra K T }
9494c088493SRaghavendra K T 
95006e48c51SRaghavendra K T static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
95106e48c51SRaghavendra K T {
95206e48c51SRaghavendra K T 	return true;
95306e48c51SRaghavendra K T }
95406e48c51SRaghavendra K T 
9554c088493SRaghavendra K T #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
956bfd99ff5SAvi Kivity #endif
957bfd99ff5SAvi Kivity 
958