xref: /openbmc/linux/arch/x86/kvm/mmu/page_track.c (revision f97769fd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Support KVM gust page tracking
4  *
5  * This feature allows us to track page access in guest. Currently, only
6  * write access is tracked.
7  *
8  * Copyright(C) 2015 Intel Corporation.
9  *
10  * Author:
11  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
12  */
13 
14 #include <linux/kvm_host.h>
15 #include <linux/rculist.h>
16 
17 #include <asm/kvm_page_track.h>
18 
19 #include "mmu_internal.h"
20 
21 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
22 {
23 	int i;
24 
25 	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
26 		kvfree(slot->arch.gfn_track[i]);
27 		slot->arch.gfn_track[i] = NULL;
28 	}
29 }
30 
31 int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
32 				  unsigned long npages)
33 {
34 	int  i;
35 
36 	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
37 		slot->arch.gfn_track[i] =
38 			kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
39 				 GFP_KERNEL_ACCOUNT);
40 		if (!slot->arch.gfn_track[i])
41 			goto track_free;
42 	}
43 
44 	return 0;
45 
46 track_free:
47 	kvm_page_track_free_memslot(slot);
48 	return -ENOMEM;
49 }
50 
51 static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
52 {
53 	if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
54 		return false;
55 
56 	return true;
57 }
58 
59 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
60 			     enum kvm_page_track_mode mode, short count)
61 {
62 	int index, val;
63 
64 	index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
65 
66 	val = slot->arch.gfn_track[mode][index];
67 
68 	if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
69 		return;
70 
71 	slot->arch.gfn_track[mode][index] += count;
72 }
73 
74 /*
75  * add guest page to the tracking pool so that corresponding access on that
76  * page will be intercepted.
77  *
78  * It should be called under the protection both of mmu-lock and kvm->srcu
79  * or kvm->slots_lock.
80  *
81  * @kvm: the guest instance we are interested in.
82  * @slot: the @gfn belongs to.
83  * @gfn: the guest page.
84  * @mode: tracking mode, currently only write track is supported.
85  */
86 void kvm_slot_page_track_add_page(struct kvm *kvm,
87 				  struct kvm_memory_slot *slot, gfn_t gfn,
88 				  enum kvm_page_track_mode mode)
89 {
90 
91 	if (WARN_ON(!page_track_mode_is_valid(mode)))
92 		return;
93 
94 	update_gfn_track(slot, gfn, mode, 1);
95 
96 	/*
97 	 * new track stops large page mapping for the
98 	 * tracked page.
99 	 */
100 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
101 
102 	if (mode == KVM_PAGE_TRACK_WRITE)
103 		if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
104 			kvm_flush_remote_tlbs(kvm);
105 }
106 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
107 
108 /*
109  * remove the guest page from the tracking pool which stops the interception
110  * of corresponding access on that page. It is the opposed operation of
111  * kvm_slot_page_track_add_page().
112  *
113  * It should be called under the protection both of mmu-lock and kvm->srcu
114  * or kvm->slots_lock.
115  *
116  * @kvm: the guest instance we are interested in.
117  * @slot: the @gfn belongs to.
118  * @gfn: the guest page.
119  * @mode: tracking mode, currently only write track is supported.
120  */
121 void kvm_slot_page_track_remove_page(struct kvm *kvm,
122 				     struct kvm_memory_slot *slot, gfn_t gfn,
123 				     enum kvm_page_track_mode mode)
124 {
125 	if (WARN_ON(!page_track_mode_is_valid(mode)))
126 		return;
127 
128 	update_gfn_track(slot, gfn, mode, -1);
129 
130 	/*
131 	 * allow large page mapping for the tracked page
132 	 * after the tracker is gone.
133 	 */
134 	kvm_mmu_gfn_allow_lpage(slot, gfn);
135 }
136 EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
137 
138 /*
139  * check if the corresponding access on the specified guest page is tracked.
140  */
141 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
142 			      enum kvm_page_track_mode mode)
143 {
144 	struct kvm_memory_slot *slot;
145 	int index;
146 
147 	if (WARN_ON(!page_track_mode_is_valid(mode)))
148 		return false;
149 
150 	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
151 	if (!slot)
152 		return false;
153 
154 	index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
155 	return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
156 }
157 
158 void kvm_page_track_cleanup(struct kvm *kvm)
159 {
160 	struct kvm_page_track_notifier_head *head;
161 
162 	head = &kvm->arch.track_notifier_head;
163 	cleanup_srcu_struct(&head->track_srcu);
164 }
165 
166 void kvm_page_track_init(struct kvm *kvm)
167 {
168 	struct kvm_page_track_notifier_head *head;
169 
170 	head = &kvm->arch.track_notifier_head;
171 	init_srcu_struct(&head->track_srcu);
172 	INIT_HLIST_HEAD(&head->track_notifier_list);
173 }
174 
175 /*
176  * register the notifier so that event interception for the tracked guest
177  * pages can be received.
178  */
179 void
180 kvm_page_track_register_notifier(struct kvm *kvm,
181 				 struct kvm_page_track_notifier_node *n)
182 {
183 	struct kvm_page_track_notifier_head *head;
184 
185 	head = &kvm->arch.track_notifier_head;
186 
187 	spin_lock(&kvm->mmu_lock);
188 	hlist_add_head_rcu(&n->node, &head->track_notifier_list);
189 	spin_unlock(&kvm->mmu_lock);
190 }
191 EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
192 
193 /*
194  * stop receiving the event interception. It is the opposed operation of
195  * kvm_page_track_register_notifier().
196  */
197 void
198 kvm_page_track_unregister_notifier(struct kvm *kvm,
199 				   struct kvm_page_track_notifier_node *n)
200 {
201 	struct kvm_page_track_notifier_head *head;
202 
203 	head = &kvm->arch.track_notifier_head;
204 
205 	spin_lock(&kvm->mmu_lock);
206 	hlist_del_rcu(&n->node);
207 	spin_unlock(&kvm->mmu_lock);
208 	synchronize_srcu(&head->track_srcu);
209 }
210 EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
211 
212 /*
213  * Notify the node that write access is intercepted and write emulation is
214  * finished at this time.
215  *
216  * The node should figure out if the written page is the one that node is
217  * interested in by itself.
218  */
219 void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
220 			  int bytes)
221 {
222 	struct kvm_page_track_notifier_head *head;
223 	struct kvm_page_track_notifier_node *n;
224 	int idx;
225 
226 	head = &vcpu->kvm->arch.track_notifier_head;
227 
228 	if (hlist_empty(&head->track_notifier_list))
229 		return;
230 
231 	idx = srcu_read_lock(&head->track_srcu);
232 	hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
233 		if (n->track_write)
234 			n->track_write(vcpu, gpa, new, bytes, n);
235 	srcu_read_unlock(&head->track_srcu, idx);
236 }
237 
238 /*
239  * Notify the node that memory slot is being removed or moved so that it can
240  * drop write-protection for the pages in the memory slot.
241  *
242  * The node should figure out it has any write-protected pages in this slot
243  * by itself.
244  */
245 void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
246 {
247 	struct kvm_page_track_notifier_head *head;
248 	struct kvm_page_track_notifier_node *n;
249 	int idx;
250 
251 	head = &kvm->arch.track_notifier_head;
252 
253 	if (hlist_empty(&head->track_notifier_list))
254 		return;
255 
256 	idx = srcu_read_lock(&head->track_srcu);
257 	hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
258 		if (n->track_flush_slot)
259 			n->track_flush_slot(kvm, slot, n);
260 	srcu_read_unlock(&head->track_srcu, idx);
261 }
262