1fb04a1edSPeter Xu #ifndef KVM_DIRTY_RING_H
2fb04a1edSPeter Xu #define KVM_DIRTY_RING_H
3fb04a1edSPeter Xu 
4fb04a1edSPeter Xu #include <linux/kvm.h>
5fb04a1edSPeter Xu 
6fb04a1edSPeter Xu /**
7fb04a1edSPeter Xu  * kvm_dirty_ring: KVM internal dirty ring structure
8fb04a1edSPeter Xu  *
9fb04a1edSPeter Xu  * @dirty_index: free running counter that points to the next slot in
10fb04a1edSPeter Xu  *               dirty_ring->dirty_gfns, where a new dirty page should go
11fb04a1edSPeter Xu  * @reset_index: free running counter that points to the next dirty page
12fb04a1edSPeter Xu  *               in dirty_ring->dirty_gfns for which dirty trap needs to
13fb04a1edSPeter Xu  *               be reenabled
14fb04a1edSPeter Xu  * @size:        size of the compact list, dirty_ring->dirty_gfns
15fb04a1edSPeter Xu  * @soft_limit:  when the number of dirty pages in the list reaches this
16fb04a1edSPeter Xu  *               limit, vcpu that owns this ring should exit to userspace
17fb04a1edSPeter Xu  *               to allow userspace to harvest all the dirty pages
18fb04a1edSPeter Xu  * @dirty_gfns:  the array to keep the dirty gfns
19fb04a1edSPeter Xu  * @index:       index of this dirty ring
20fb04a1edSPeter Xu  */
21fb04a1edSPeter Xu struct kvm_dirty_ring {
22fb04a1edSPeter Xu 	u32 dirty_index;
23fb04a1edSPeter Xu 	u32 reset_index;
24fb04a1edSPeter Xu 	u32 size;
25fb04a1edSPeter Xu 	u32 soft_limit;
26fb04a1edSPeter Xu 	struct kvm_dirty_gfn *dirty_gfns;
27fb04a1edSPeter Xu 	int index;
28fb04a1edSPeter Xu };
29fb04a1edSPeter Xu 
30dc70ec21SDavid Woodhouse #ifndef CONFIG_HAVE_KVM_DIRTY_RING
31fb04a1edSPeter Xu /*
32dc70ec21SDavid Woodhouse  * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
33fb04a1edSPeter Xu  * not be included as well, so define these nop functions for the arch.
34fb04a1edSPeter Xu  */
kvm_dirty_ring_get_rsvd_entries(void)35fb04a1edSPeter Xu static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
36fb04a1edSPeter Xu {
37fb04a1edSPeter Xu 	return 0;
38fb04a1edSPeter Xu }
39fb04a1edSPeter Xu 
kvm_use_dirty_bitmap(struct kvm * kvm)40*86bdf3ebSGavin Shan static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
41*86bdf3ebSGavin Shan {
42*86bdf3ebSGavin Shan 	return true;
43*86bdf3ebSGavin Shan }
44*86bdf3ebSGavin Shan 
kvm_dirty_ring_alloc(struct kvm_dirty_ring * ring,int index,u32 size)45fb04a1edSPeter Xu static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
46fb04a1edSPeter Xu 				       int index, u32 size)
47fb04a1edSPeter Xu {
48fb04a1edSPeter Xu 	return 0;
49fb04a1edSPeter Xu }
50fb04a1edSPeter Xu 
kvm_dirty_ring_reset(struct kvm * kvm,struct kvm_dirty_ring * ring)51fb04a1edSPeter Xu static inline int kvm_dirty_ring_reset(struct kvm *kvm,
52fb04a1edSPeter Xu 				       struct kvm_dirty_ring *ring)
53fb04a1edSPeter Xu {
54fb04a1edSPeter Xu 	return 0;
55fb04a1edSPeter Xu }
56fb04a1edSPeter Xu 
kvm_dirty_ring_push(struct kvm_vcpu * vcpu,u32 slot,u64 offset)57cf87ac73SGavin Shan static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
58fb04a1edSPeter Xu 				       u32 slot, u64 offset)
59fb04a1edSPeter Xu {
60fb04a1edSPeter Xu }
61fb04a1edSPeter Xu 
kvm_dirty_ring_get_page(struct kvm_dirty_ring * ring,u32 offset)62fb04a1edSPeter Xu static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring,
63fb04a1edSPeter Xu 						   u32 offset)
64fb04a1edSPeter Xu {
65fb04a1edSPeter Xu 	return NULL;
66fb04a1edSPeter Xu }
67fb04a1edSPeter Xu 
kvm_dirty_ring_free(struct kvm_dirty_ring * ring)68fb04a1edSPeter Xu static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
69fb04a1edSPeter Xu {
70fb04a1edSPeter Xu }
71fb04a1edSPeter Xu 
72dc70ec21SDavid Woodhouse #else /* CONFIG_HAVE_KVM_DIRTY_RING */
73fb04a1edSPeter Xu 
74e8a18565SGavin Shan int kvm_cpu_dirty_log_size(void);
75*86bdf3ebSGavin Shan bool kvm_use_dirty_bitmap(struct kvm *kvm);
76*86bdf3ebSGavin Shan bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
77fb04a1edSPeter Xu u32 kvm_dirty_ring_get_rsvd_entries(void);
78fb04a1edSPeter Xu int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
79fb04a1edSPeter Xu 
80fb04a1edSPeter Xu /*
81fb04a1edSPeter Xu  * called with kvm->slots_lock held, returns the number of
82fb04a1edSPeter Xu  * processed pages.
83fb04a1edSPeter Xu  */
84fb04a1edSPeter Xu int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
85fb04a1edSPeter Xu 
86fb04a1edSPeter Xu /*
87fb04a1edSPeter Xu  * returns =0: successfully pushed
88fb04a1edSPeter Xu  *         <0: unable to push, need to wait
89fb04a1edSPeter Xu  */
90cf87ac73SGavin Shan void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
91cf87ac73SGavin Shan 
92cf87ac73SGavin Shan bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
93fb04a1edSPeter Xu 
94fb04a1edSPeter Xu /* for use in vm_operations_struct */
95fb04a1edSPeter Xu struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
96fb04a1edSPeter Xu 
97fb04a1edSPeter Xu void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
98fb04a1edSPeter Xu 
99dc70ec21SDavid Woodhouse #endif /* CONFIG_HAVE_KVM_DIRTY_RING */
100fb04a1edSPeter Xu 
101fb04a1edSPeter Xu #endif	/* KVM_DIRTY_RING_H */
102