1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * This module enables kernel and guest-mode vCPU access to guest physical 6 * memory with suitable invalidation mechanisms. 7 * 8 * Copyright © 2021 Amazon.com, Inc. or its affiliates. 9 * 10 * Authors: 11 * David Woodhouse <dwmw2@infradead.org> 12 */ 13 14 #include <linux/kvm_host.h> 15 #include <linux/kvm.h> 16 #include <linux/highmem.h> 17 #include <linux/module.h> 18 #include <linux/errno.h> 19 20 #include "kvm_mm.h" 21 22 /* 23 * MMU notifier 'invalidate_range_start' hook. 24 */ 25 void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, 26 unsigned long end, bool may_block) 27 { 28 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); 29 struct gfn_to_pfn_cache *gpc; 30 bool evict_vcpus = false; 31 32 spin_lock(&kvm->gpc_lock); 33 list_for_each_entry(gpc, &kvm->gpc_list, list) { 34 write_lock_irq(&gpc->lock); 35 36 /* Only a single page so no need to care about length */ 37 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && 38 gpc->uhva >= start && gpc->uhva < end) { 39 gpc->valid = false; 40 41 /* 42 * If a guest vCPU could be using the physical address, 43 * it needs to be forced out of guest mode. 44 */ 45 if (gpc->usage & KVM_GUEST_USES_PFN) { 46 if (!evict_vcpus) { 47 evict_vcpus = true; 48 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); 49 } 50 __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap); 51 } 52 } 53 write_unlock_irq(&gpc->lock); 54 } 55 spin_unlock(&kvm->gpc_lock); 56 57 if (evict_vcpus) { 58 /* 59 * KVM needs to ensure the vCPU is fully out of guest context 60 * before allowing the invalidation to continue. 61 */ 62 unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE; 63 bool called; 64 65 /* 66 * If the OOM reaper is active, then all vCPUs should have 67 * been stopped already, so perform the request without 68 * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd. 69 */ 70 if (!may_block) 71 req &= ~KVM_REQUEST_WAIT; 72 73 called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap); 74 75 WARN_ON_ONCE(called && !may_block); 76 } 77 } 78 79 bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, 80 gpa_t gpa, unsigned long len) 81 { 82 struct kvm_memslots *slots = kvm_memslots(kvm); 83 84 if (!gpc->active) 85 return false; 86 87 if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE) 88 return false; 89 90 if (gpc->gpa != gpa || gpc->generation != slots->generation || 91 kvm_is_error_hva(gpc->uhva)) 92 return false; 93 94 if (!gpc->valid) 95 return false; 96 97 return true; 98 } 99 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check); 100 101 static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva) 102 { 103 /* Unmap the old pfn/page if it was mapped before. */ 104 if (!is_error_noslot_pfn(pfn) && khva) { 105 if (pfn_valid(pfn)) 106 kunmap(pfn_to_page(pfn)); 107 #ifdef CONFIG_HAS_IOMEM 108 else 109 memunmap(khva); 110 #endif 111 } 112 } 113 114 static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq) 115 { 116 /* 117 * mn_active_invalidate_count acts for all intents and purposes 118 * like mmu_invalidate_in_progress here; but the latter cannot 119 * be used here because the invalidation of caches in the 120 * mmu_notifier event occurs _before_ mmu_invalidate_in_progress 121 * is elevated. 122 * 123 * Note, it does not matter that mn_active_invalidate_count 124 * is not protected by gpc->lock. It is guaranteed to 125 * be elevated before the mmu_notifier acquires gpc->lock, and 126 * isn't dropped until after mmu_invalidate_seq is updated. 127 */ 128 if (kvm->mn_active_invalidate_count) 129 return true; 130 131 /* 132 * Ensure mn_active_invalidate_count is read before 133 * mmu_invalidate_seq. This pairs with the smp_wmb() in 134 * mmu_notifier_invalidate_range_end() to guarantee either the 135 * old (non-zero) value of mn_active_invalidate_count or the 136 * new (incremented) value of mmu_invalidate_seq is observed. 137 */ 138 smp_rmb(); 139 return kvm->mmu_invalidate_seq != mmu_seq; 140 } 141 142 static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) 143 { 144 /* Note, the new page offset may be different than the old! */ 145 void *old_khva = gpc->khva - offset_in_page(gpc->khva); 146 kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT; 147 void *new_khva = NULL; 148 unsigned long mmu_seq; 149 150 lockdep_assert_held(&gpc->refresh_lock); 151 152 lockdep_assert_held_write(&gpc->lock); 153 154 /* 155 * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva 156 * assets have already been updated and so a concurrent check() from a 157 * different task may not fail the gpa/uhva/generation checks. 158 */ 159 gpc->valid = false; 160 161 do { 162 mmu_seq = kvm->mmu_invalidate_seq; 163 smp_rmb(); 164 165 write_unlock_irq(&gpc->lock); 166 167 /* 168 * If the previous iteration "failed" due to an mmu_notifier 169 * event, release the pfn and unmap the kernel virtual address 170 * from the previous attempt. Unmapping might sleep, so this 171 * needs to be done after dropping the lock. Opportunistically 172 * check for resched while the lock isn't held. 173 */ 174 if (new_pfn != KVM_PFN_ERR_FAULT) { 175 /* 176 * Keep the mapping if the previous iteration reused 177 * the existing mapping and didn't create a new one. 178 */ 179 if (new_khva != old_khva) 180 gpc_unmap_khva(kvm, new_pfn, new_khva); 181 182 kvm_release_pfn_clean(new_pfn); 183 184 cond_resched(); 185 } 186 187 /* We always request a writeable mapping */ 188 new_pfn = hva_to_pfn(gpc->uhva, false, NULL, true, NULL); 189 if (is_error_noslot_pfn(new_pfn)) 190 goto out_error; 191 192 /* 193 * Obtain a new kernel mapping if KVM itself will access the 194 * pfn. Note, kmap() and memremap() can both sleep, so this 195 * too must be done outside of gpc->lock! 196 */ 197 if (gpc->usage & KVM_HOST_USES_PFN) { 198 if (new_pfn == gpc->pfn) { 199 new_khva = old_khva; 200 } else if (pfn_valid(new_pfn)) { 201 new_khva = kmap(pfn_to_page(new_pfn)); 202 #ifdef CONFIG_HAS_IOMEM 203 } else { 204 new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB); 205 #endif 206 } 207 if (!new_khva) { 208 kvm_release_pfn_clean(new_pfn); 209 goto out_error; 210 } 211 } 212 213 write_lock_irq(&gpc->lock); 214 215 /* 216 * Other tasks must wait for _this_ refresh to complete before 217 * attempting to refresh. 218 */ 219 WARN_ON_ONCE(gpc->valid); 220 } while (mmu_notifier_retry_cache(kvm, mmu_seq)); 221 222 gpc->valid = true; 223 gpc->pfn = new_pfn; 224 gpc->khva = new_khva + (gpc->gpa & ~PAGE_MASK); 225 226 /* 227 * Put the reference to the _new_ pfn. The pfn is now tracked by the 228 * cache and can be safely migrated, swapped, etc... as the cache will 229 * invalidate any mappings in response to relevant mmu_notifier events. 230 */ 231 kvm_release_pfn_clean(new_pfn); 232 233 return 0; 234 235 out_error: 236 write_lock_irq(&gpc->lock); 237 238 return -EFAULT; 239 } 240 241 int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, 242 gpa_t gpa, unsigned long len) 243 { 244 struct kvm_memslots *slots = kvm_memslots(kvm); 245 unsigned long page_offset = gpa & ~PAGE_MASK; 246 bool unmap_old = false; 247 unsigned long old_uhva; 248 kvm_pfn_t old_pfn; 249 void *old_khva; 250 int ret; 251 252 /* 253 * If must fit within a single page. The 'len' argument is 254 * only to enforce that. 255 */ 256 if (page_offset + len > PAGE_SIZE) 257 return -EINVAL; 258 259 /* 260 * If another task is refreshing the cache, wait for it to complete. 261 * There is no guarantee that concurrent refreshes will see the same 262 * gpa, memslots generation, etc..., so they must be fully serialized. 263 */ 264 mutex_lock(&gpc->refresh_lock); 265 266 write_lock_irq(&gpc->lock); 267 268 if (!gpc->active) { 269 ret = -EINVAL; 270 goto out_unlock; 271 } 272 273 old_pfn = gpc->pfn; 274 old_khva = gpc->khva - offset_in_page(gpc->khva); 275 old_uhva = gpc->uhva; 276 277 /* If the userspace HVA is invalid, refresh that first */ 278 if (gpc->gpa != gpa || gpc->generation != slots->generation || 279 kvm_is_error_hva(gpc->uhva)) { 280 gfn_t gfn = gpa_to_gfn(gpa); 281 282 gpc->gpa = gpa; 283 gpc->generation = slots->generation; 284 gpc->memslot = __gfn_to_memslot(slots, gfn); 285 gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); 286 287 if (kvm_is_error_hva(gpc->uhva)) { 288 ret = -EFAULT; 289 goto out; 290 } 291 } 292 293 /* 294 * If the userspace HVA changed or the PFN was already invalid, 295 * drop the lock and do the HVA to PFN lookup again. 296 */ 297 if (!gpc->valid || old_uhva != gpc->uhva) { 298 ret = hva_to_pfn_retry(kvm, gpc); 299 } else { 300 /* If the HVA→PFN mapping was already valid, don't unmap it. */ 301 old_pfn = KVM_PFN_ERR_FAULT; 302 old_khva = NULL; 303 ret = 0; 304 } 305 306 out: 307 /* 308 * Invalidate the cache and purge the pfn/khva if the refresh failed. 309 * Some/all of the uhva, gpa, and memslot generation info may still be 310 * valid, leave it as is. 311 */ 312 if (ret) { 313 gpc->valid = false; 314 gpc->pfn = KVM_PFN_ERR_FAULT; 315 gpc->khva = NULL; 316 } 317 318 /* Detect a pfn change before dropping the lock! */ 319 unmap_old = (old_pfn != gpc->pfn); 320 321 out_unlock: 322 write_unlock_irq(&gpc->lock); 323 324 mutex_unlock(&gpc->refresh_lock); 325 326 if (unmap_old) 327 gpc_unmap_khva(kvm, old_pfn, old_khva); 328 329 return ret; 330 } 331 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh); 332 333 void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) 334 { 335 void *old_khva; 336 kvm_pfn_t old_pfn; 337 338 mutex_lock(&gpc->refresh_lock); 339 write_lock_irq(&gpc->lock); 340 341 gpc->valid = false; 342 343 old_khva = gpc->khva - offset_in_page(gpc->khva); 344 old_pfn = gpc->pfn; 345 346 /* 347 * We can leave the GPA → uHVA map cache intact but the PFN 348 * lookup will need to be redone even for the same page. 349 */ 350 gpc->khva = NULL; 351 gpc->pfn = KVM_PFN_ERR_FAULT; 352 353 write_unlock_irq(&gpc->lock); 354 mutex_unlock(&gpc->refresh_lock); 355 356 gpc_unmap_khva(kvm, old_pfn, old_khva); 357 } 358 EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); 359 360 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc) 361 { 362 rwlock_init(&gpc->lock); 363 mutex_init(&gpc->refresh_lock); 364 } 365 EXPORT_SYMBOL_GPL(kvm_gpc_init); 366 367 int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, 368 struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, 369 gpa_t gpa, unsigned long len) 370 { 371 WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); 372 373 if (!gpc->active) { 374 gpc->khva = NULL; 375 gpc->pfn = KVM_PFN_ERR_FAULT; 376 gpc->uhva = KVM_HVA_ERR_BAD; 377 gpc->vcpu = vcpu; 378 gpc->usage = usage; 379 gpc->valid = false; 380 381 spin_lock(&kvm->gpc_lock); 382 list_add(&gpc->list, &kvm->gpc_list); 383 spin_unlock(&kvm->gpc_lock); 384 385 /* 386 * Activate the cache after adding it to the list, a concurrent 387 * refresh must not establish a mapping until the cache is 388 * reachable by mmu_notifier events. 389 */ 390 write_lock_irq(&gpc->lock); 391 gpc->active = true; 392 write_unlock_irq(&gpc->lock); 393 } 394 return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len); 395 } 396 EXPORT_SYMBOL_GPL(kvm_gpc_activate); 397 398 void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) 399 { 400 if (gpc->active) { 401 /* 402 * Deactivate the cache before removing it from the list, KVM 403 * must stall mmu_notifier events until all users go away, i.e. 404 * until gpc->lock is dropped and refresh is guaranteed to fail. 405 */ 406 write_lock_irq(&gpc->lock); 407 gpc->active = false; 408 write_unlock_irq(&gpc->lock); 409 410 spin_lock(&kvm->gpc_lock); 411 list_del(&gpc->list); 412 spin_unlock(&kvm->gpc_lock); 413 414 kvm_gfn_to_pfn_cache_unmap(kvm, gpc); 415 } 416 } 417 EXPORT_SYMBOL_GPL(kvm_gpc_deactivate); 418