1a413a625STom Rix // SPDX-License-Identifier: GPL-2.0-only
2fb04a1edSPeter Xu /*
3fb04a1edSPeter Xu * KVM dirty ring implementation
4fb04a1edSPeter Xu *
5fb04a1edSPeter Xu * Copyright 2019 Red Hat, Inc.
6fb04a1edSPeter Xu */
7fb04a1edSPeter Xu #include <linux/kvm_host.h>
8fb04a1edSPeter Xu #include <linux/kvm.h>
9fb04a1edSPeter Xu #include <linux/vmalloc.h>
10fb04a1edSPeter Xu #include <linux/kvm_dirty_ring.h>
11fb04a1edSPeter Xu #include <trace/events/kvm.h>
12982ed0deSDavid Woodhouse #include "kvm_mm.h"
13fb04a1edSPeter Xu
kvm_cpu_dirty_log_size(void)14fb04a1edSPeter Xu int __weak kvm_cpu_dirty_log_size(void)
15fb04a1edSPeter Xu {
16fb04a1edSPeter Xu return 0;
17fb04a1edSPeter Xu }
18fb04a1edSPeter Xu
kvm_dirty_ring_get_rsvd_entries(void)19fb04a1edSPeter Xu u32 kvm_dirty_ring_get_rsvd_entries(void)
20fb04a1edSPeter Xu {
21fb04a1edSPeter Xu return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
22fb04a1edSPeter Xu }
23fb04a1edSPeter Xu
kvm_use_dirty_bitmap(struct kvm * kvm)24*86bdf3ebSGavin Shan bool kvm_use_dirty_bitmap(struct kvm *kvm)
25*86bdf3ebSGavin Shan {
26*86bdf3ebSGavin Shan lockdep_assert_held(&kvm->slots_lock);
27*86bdf3ebSGavin Shan
28*86bdf3ebSGavin Shan return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
29*86bdf3ebSGavin Shan }
30*86bdf3ebSGavin Shan
31*86bdf3ebSGavin Shan #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
kvm_arch_allow_write_without_running_vcpu(struct kvm * kvm)32*86bdf3ebSGavin Shan bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
33*86bdf3ebSGavin Shan {
34*86bdf3ebSGavin Shan return false;
35*86bdf3ebSGavin Shan }
36*86bdf3ebSGavin Shan #endif
37*86bdf3ebSGavin Shan
kvm_dirty_ring_used(struct kvm_dirty_ring * ring)38fb04a1edSPeter Xu static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
39fb04a1edSPeter Xu {
40fb04a1edSPeter Xu return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
41fb04a1edSPeter Xu }
42fb04a1edSPeter Xu
kvm_dirty_ring_soft_full(struct kvm_dirty_ring * ring)43cf87ac73SGavin Shan static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
44fb04a1edSPeter Xu {
45fb04a1edSPeter Xu return kvm_dirty_ring_used(ring) >= ring->soft_limit;
46fb04a1edSPeter Xu }
47fb04a1edSPeter Xu
kvm_dirty_ring_full(struct kvm_dirty_ring * ring)48fb04a1edSPeter Xu static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
49fb04a1edSPeter Xu {
50fb04a1edSPeter Xu return kvm_dirty_ring_used(ring) >= ring->size;
51fb04a1edSPeter Xu }
52fb04a1edSPeter Xu
kvm_reset_dirty_gfn(struct kvm * kvm,u32 slot,u64 offset,u64 mask)53fb04a1edSPeter Xu static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
54fb04a1edSPeter Xu {
55fb04a1edSPeter Xu struct kvm_memory_slot *memslot;
56fb04a1edSPeter Xu int as_id, id;
57fb04a1edSPeter Xu
58fb04a1edSPeter Xu as_id = slot >> 16;
59fb04a1edSPeter Xu id = (u16)slot;
60fb04a1edSPeter Xu
61fb04a1edSPeter Xu if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
62fb04a1edSPeter Xu return;
63fb04a1edSPeter Xu
64fb04a1edSPeter Xu memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
65fb04a1edSPeter Xu
66fb04a1edSPeter Xu if (!memslot || (offset + __fls(mask)) >= memslot->npages)
67fb04a1edSPeter Xu return;
68fb04a1edSPeter Xu
69531810caSBen Gardon KVM_MMU_LOCK(kvm);
70fb04a1edSPeter Xu kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
71531810caSBen Gardon KVM_MMU_UNLOCK(kvm);
72fb04a1edSPeter Xu }
73fb04a1edSPeter Xu
kvm_dirty_ring_alloc(struct kvm_dirty_ring * ring,int index,u32 size)74fb04a1edSPeter Xu int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
75fb04a1edSPeter Xu {
76c910662cSTian Tao ring->dirty_gfns = vzalloc(size);
77fb04a1edSPeter Xu if (!ring->dirty_gfns)
78fb04a1edSPeter Xu return -ENOMEM;
79fb04a1edSPeter Xu
80fb04a1edSPeter Xu ring->size = size / sizeof(struct kvm_dirty_gfn);
81fb04a1edSPeter Xu ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
82fb04a1edSPeter Xu ring->dirty_index = 0;
83fb04a1edSPeter Xu ring->reset_index = 0;
84fb04a1edSPeter Xu ring->index = index;
85fb04a1edSPeter Xu
86fb04a1edSPeter Xu return 0;
87fb04a1edSPeter Xu }
88fb04a1edSPeter Xu
kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn * gfn)89fb04a1edSPeter Xu static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
90fb04a1edSPeter Xu {
918929bc96SMarc Zyngier smp_store_release(&gfn->flags, 0);
92fb04a1edSPeter Xu }
93fb04a1edSPeter Xu
kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn * gfn)94fb04a1edSPeter Xu static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
95fb04a1edSPeter Xu {
96fb04a1edSPeter Xu gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
97fb04a1edSPeter Xu }
98fb04a1edSPeter Xu
kvm_dirty_gfn_harvested(struct kvm_dirty_gfn * gfn)99fb04a1edSPeter Xu static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
100fb04a1edSPeter Xu {
1018929bc96SMarc Zyngier return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
102fb04a1edSPeter Xu }
103fb04a1edSPeter Xu
kvm_dirty_ring_reset(struct kvm * kvm,struct kvm_dirty_ring * ring)104fb04a1edSPeter Xu int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
105fb04a1edSPeter Xu {
106fb04a1edSPeter Xu u32 cur_slot, next_slot;
107fb04a1edSPeter Xu u64 cur_offset, next_offset;
108fb04a1edSPeter Xu unsigned long mask;
109fb04a1edSPeter Xu int count = 0;
110fb04a1edSPeter Xu struct kvm_dirty_gfn *entry;
111fb04a1edSPeter Xu bool first_round = true;
112fb04a1edSPeter Xu
113fb04a1edSPeter Xu /* This is only needed to make compilers happy */
114fb04a1edSPeter Xu cur_slot = cur_offset = mask = 0;
115fb04a1edSPeter Xu
116fb04a1edSPeter Xu while (true) {
117fb04a1edSPeter Xu entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
118fb04a1edSPeter Xu
119fb04a1edSPeter Xu if (!kvm_dirty_gfn_harvested(entry))
120fb04a1edSPeter Xu break;
121fb04a1edSPeter Xu
122fb04a1edSPeter Xu next_slot = READ_ONCE(entry->slot);
123fb04a1edSPeter Xu next_offset = READ_ONCE(entry->offset);
124fb04a1edSPeter Xu
125fb04a1edSPeter Xu /* Update the flags to reflect that this GFN is reset */
126fb04a1edSPeter Xu kvm_dirty_gfn_set_invalid(entry);
127fb04a1edSPeter Xu
128fb04a1edSPeter Xu ring->reset_index++;
129fb04a1edSPeter Xu count++;
130fb04a1edSPeter Xu /*
131fb04a1edSPeter Xu * Try to coalesce the reset operations when the guest is
132fb04a1edSPeter Xu * scanning pages in the same slot.
133fb04a1edSPeter Xu */
134fb04a1edSPeter Xu if (!first_round && next_slot == cur_slot) {
135fb04a1edSPeter Xu s64 delta = next_offset - cur_offset;
136fb04a1edSPeter Xu
137fb04a1edSPeter Xu if (delta >= 0 && delta < BITS_PER_LONG) {
138fb04a1edSPeter Xu mask |= 1ull << delta;
139fb04a1edSPeter Xu continue;
140fb04a1edSPeter Xu }
141fb04a1edSPeter Xu
142fb04a1edSPeter Xu /* Backwards visit, careful about overflows! */
143fb04a1edSPeter Xu if (delta > -BITS_PER_LONG && delta < 0 &&
144fb04a1edSPeter Xu (mask << -delta >> -delta) == mask) {
145fb04a1edSPeter Xu cur_offset = next_offset;
146fb04a1edSPeter Xu mask = (mask << -delta) | 1;
147fb04a1edSPeter Xu continue;
148fb04a1edSPeter Xu }
149fb04a1edSPeter Xu }
150fb04a1edSPeter Xu kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
151fb04a1edSPeter Xu cur_slot = next_slot;
152fb04a1edSPeter Xu cur_offset = next_offset;
153fb04a1edSPeter Xu mask = 1;
154fb04a1edSPeter Xu first_round = false;
155fb04a1edSPeter Xu }
156fb04a1edSPeter Xu
157fb04a1edSPeter Xu kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
158fb04a1edSPeter Xu
159cf87ac73SGavin Shan /*
160cf87ac73SGavin Shan * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
161cf87ac73SGavin Shan * by the VCPU thread next time when it enters the guest.
162cf87ac73SGavin Shan */
163cf87ac73SGavin Shan
164fb04a1edSPeter Xu trace_kvm_dirty_ring_reset(ring);
165fb04a1edSPeter Xu
166fb04a1edSPeter Xu return count;
167fb04a1edSPeter Xu }
168fb04a1edSPeter Xu
kvm_dirty_ring_push(struct kvm_vcpu * vcpu,u32 slot,u64 offset)169cf87ac73SGavin Shan void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
170fb04a1edSPeter Xu {
171cf87ac73SGavin Shan struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
172fb04a1edSPeter Xu struct kvm_dirty_gfn *entry;
173fb04a1edSPeter Xu
174fb04a1edSPeter Xu /* It should never get full */
175fb04a1edSPeter Xu WARN_ON_ONCE(kvm_dirty_ring_full(ring));
176fb04a1edSPeter Xu
177fb04a1edSPeter Xu entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
178fb04a1edSPeter Xu
179fb04a1edSPeter Xu entry->slot = slot;
180fb04a1edSPeter Xu entry->offset = offset;
181fb04a1edSPeter Xu /*
182fb04a1edSPeter Xu * Make sure the data is filled in before we publish this to
183fb04a1edSPeter Xu * the userspace program. There's no paired kernel-side reader.
184fb04a1edSPeter Xu */
185fb04a1edSPeter Xu smp_wmb();
186fb04a1edSPeter Xu kvm_dirty_gfn_set_dirtied(entry);
187fb04a1edSPeter Xu ring->dirty_index++;
188fb04a1edSPeter Xu trace_kvm_dirty_ring_push(ring, slot, offset);
189cf87ac73SGavin Shan
190cf87ac73SGavin Shan if (kvm_dirty_ring_soft_full(ring))
191cf87ac73SGavin Shan kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
192cf87ac73SGavin Shan }
193cf87ac73SGavin Shan
kvm_dirty_ring_check_request(struct kvm_vcpu * vcpu)194cf87ac73SGavin Shan bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
195cf87ac73SGavin Shan {
196cf87ac73SGavin Shan /*
197cf87ac73SGavin Shan * The VCPU isn't runnable when the dirty ring becomes soft full.
198cf87ac73SGavin Shan * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
199cf87ac73SGavin Shan * the VCPU from running until the dirty pages are harvested and
200cf87ac73SGavin Shan * the dirty ring is reset by userspace.
201cf87ac73SGavin Shan */
202cf87ac73SGavin Shan if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
203cf87ac73SGavin Shan kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
204cf87ac73SGavin Shan kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
205cf87ac73SGavin Shan vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
206cf87ac73SGavin Shan trace_kvm_dirty_ring_exit(vcpu);
207cf87ac73SGavin Shan return true;
208cf87ac73SGavin Shan }
209cf87ac73SGavin Shan
210cf87ac73SGavin Shan return false;
211fb04a1edSPeter Xu }
212fb04a1edSPeter Xu
kvm_dirty_ring_get_page(struct kvm_dirty_ring * ring,u32 offset)213fb04a1edSPeter Xu struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
214fb04a1edSPeter Xu {
215fb04a1edSPeter Xu return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
216fb04a1edSPeter Xu }
217fb04a1edSPeter Xu
kvm_dirty_ring_free(struct kvm_dirty_ring * ring)218fb04a1edSPeter Xu void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
219fb04a1edSPeter Xu {
220fb04a1edSPeter Xu vfree(ring->dirty_gfns);
221fb04a1edSPeter Xu ring->dirty_gfns = NULL;
222fb04a1edSPeter Xu }
223