1af585b92SGleb Natapov /* 2af585b92SGleb Natapov * kvm asynchronous fault support 3af585b92SGleb Natapov * 4af585b92SGleb Natapov * Copyright 2010 Red Hat, Inc. 5af585b92SGleb Natapov * 6af585b92SGleb Natapov * Author: 7af585b92SGleb Natapov * Gleb Natapov <gleb@redhat.com> 8af585b92SGleb Natapov * 9af585b92SGleb Natapov * This file is free software; you can redistribute it and/or modify 10af585b92SGleb Natapov * it under the terms of version 2 of the GNU General Public License 11af585b92SGleb Natapov * as published by the Free Software Foundation. 12af585b92SGleb Natapov * 13af585b92SGleb Natapov * This program is distributed in the hope that it will be useful, 14af585b92SGleb Natapov * but WITHOUT ANY WARRANTY; without even the implied warranty of 15af585b92SGleb Natapov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16af585b92SGleb Natapov * GNU General Public License for more details. 17af585b92SGleb Natapov * 18af585b92SGleb Natapov * You should have received a copy of the GNU General Public License 19af585b92SGleb Natapov * along with this program; if not, write to the Free Software Foundation, 20af585b92SGleb Natapov * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 21af585b92SGleb Natapov */ 22af585b92SGleb Natapov 23af585b92SGleb Natapov #include <linux/kvm_host.h> 24af585b92SGleb Natapov #include <linux/slab.h> 25af585b92SGleb Natapov #include <linux/module.h> 26af585b92SGleb Natapov #include <linux/mmu_context.h> 276e84f315SIngo Molnar #include <linux/sched/mm.h> 28af585b92SGleb Natapov 29af585b92SGleb Natapov #include "async_pf.h" 30af585b92SGleb Natapov #include <trace/events/kvm.h> 31af585b92SGleb Natapov 32e0ead41aSDominik Dingel static inline void kvm_async_page_present_sync(struct kvm_vcpu *vcpu, 33e0ead41aSDominik Dingel struct kvm_async_pf *work) 34e0ead41aSDominik Dingel { 35e0ead41aSDominik Dingel #ifdef CONFIG_KVM_ASYNC_PF_SYNC 36e0ead41aSDominik Dingel kvm_arch_async_page_present(vcpu, work); 37e0ead41aSDominik Dingel #endif 38e0ead41aSDominik Dingel } 39e0ead41aSDominik Dingel static inline void kvm_async_page_present_async(struct kvm_vcpu *vcpu, 40e0ead41aSDominik Dingel struct kvm_async_pf *work) 41e0ead41aSDominik Dingel { 42e0ead41aSDominik Dingel #ifndef CONFIG_KVM_ASYNC_PF_SYNC 43e0ead41aSDominik Dingel kvm_arch_async_page_present(vcpu, work); 44e0ead41aSDominik Dingel #endif 45e0ead41aSDominik Dingel } 46e0ead41aSDominik Dingel 47af585b92SGleb Natapov static struct kmem_cache *async_pf_cache; 48af585b92SGleb Natapov 49af585b92SGleb Natapov int kvm_async_pf_init(void) 50af585b92SGleb Natapov { 51af585b92SGleb Natapov async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); 52af585b92SGleb Natapov 53af585b92SGleb Natapov if (!async_pf_cache) 54af585b92SGleb Natapov return -ENOMEM; 55af585b92SGleb Natapov 56af585b92SGleb Natapov return 0; 57af585b92SGleb Natapov } 58af585b92SGleb Natapov 59af585b92SGleb Natapov void kvm_async_pf_deinit(void) 60af585b92SGleb Natapov { 61af585b92SGleb Natapov kmem_cache_destroy(async_pf_cache); 62af585b92SGleb Natapov async_pf_cache = NULL; 63af585b92SGleb Natapov } 64af585b92SGleb Natapov 65af585b92SGleb Natapov void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) 66af585b92SGleb Natapov { 67af585b92SGleb Natapov INIT_LIST_HEAD(&vcpu->async_pf.done); 68af585b92SGleb Natapov INIT_LIST_HEAD(&vcpu->async_pf.queue); 69af585b92SGleb Natapov spin_lock_init(&vcpu->async_pf.lock); 70af585b92SGleb Natapov } 71af585b92SGleb Natapov 72af585b92SGleb Natapov static void async_pf_execute(struct work_struct *work) 73af585b92SGleb Natapov { 74af585b92SGleb Natapov struct kvm_async_pf *apf = 75af585b92SGleb Natapov container_of(work, struct kvm_async_pf, work); 76af585b92SGleb Natapov struct mm_struct *mm = apf->mm; 77af585b92SGleb Natapov struct kvm_vcpu *vcpu = apf->vcpu; 78af585b92SGleb Natapov unsigned long addr = apf->addr; 79af585b92SGleb Natapov gva_t gva = apf->gva; 808b7457efSLorenzo Stoakes int locked = 1; 81af585b92SGleb Natapov 82af585b92SGleb Natapov might_sleep(); 83af585b92SGleb Natapov 841e987790SDave Hansen /* 851e987790SDave Hansen * This work is run asynchromously to the task which owns 861e987790SDave Hansen * mm and might be done in another context, so we must 878b7457efSLorenzo Stoakes * access remotely. 881e987790SDave Hansen */ 898b7457efSLorenzo Stoakes down_read(&mm->mmap_sem); 908b7457efSLorenzo Stoakes get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL, 918b7457efSLorenzo Stoakes &locked); 928b7457efSLorenzo Stoakes if (locked) 938b7457efSLorenzo Stoakes up_read(&mm->mmap_sem); 941e987790SDave Hansen 95e0ead41aSDominik Dingel kvm_async_page_present_sync(vcpu, apf); 96af585b92SGleb Natapov 97af585b92SGleb Natapov spin_lock(&vcpu->async_pf.lock); 98af585b92SGleb Natapov list_add_tail(&apf->link, &vcpu->async_pf.done); 9922583f0dSPaolo Bonzini apf->vcpu = NULL; 100af585b92SGleb Natapov spin_unlock(&vcpu->async_pf.lock); 101af585b92SGleb Natapov 102af585b92SGleb Natapov /* 103af585b92SGleb Natapov * apf may be freed by kvm_check_async_pf_completion() after 104af585b92SGleb Natapov * this point 105af585b92SGleb Natapov */ 106af585b92SGleb Natapov 107f2e10669Schai wen trace_kvm_async_pf_completed(addr, gva); 108af585b92SGleb Natapov 1096003a420SKosuke Tatsukawa /* 1106003a420SKosuke Tatsukawa * This memory barrier pairs with prepare_to_wait's set_current_state() 1116003a420SKosuke Tatsukawa */ 1126003a420SKosuke Tatsukawa smp_mb(); 1138577370fSMarcelo Tosatti if (swait_active(&vcpu->wq)) 1148577370fSMarcelo Tosatti swake_up(&vcpu->wq); 115af585b92SGleb Natapov 11641c22f62SOleg Nesterov mmput(mm); 117af585b92SGleb Natapov kvm_put_kvm(vcpu->kvm); 118af585b92SGleb Natapov } 119af585b92SGleb Natapov 120af585b92SGleb Natapov void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) 121af585b92SGleb Natapov { 12222583f0dSPaolo Bonzini spin_lock(&vcpu->async_pf.lock); 12322583f0dSPaolo Bonzini 124af585b92SGleb Natapov /* cancel outstanding work queue item */ 125af585b92SGleb Natapov while (!list_empty(&vcpu->async_pf.queue)) { 126af585b92SGleb Natapov struct kvm_async_pf *work = 127433da860SGeliang Tang list_first_entry(&vcpu->async_pf.queue, 128af585b92SGleb Natapov typeof(*work), queue); 129af585b92SGleb Natapov list_del(&work->queue); 1309f2ceda4SDominik Dingel 13122583f0dSPaolo Bonzini /* 13222583f0dSPaolo Bonzini * We know it's present in vcpu->async_pf.done, do 13322583f0dSPaolo Bonzini * nothing here. 13422583f0dSPaolo Bonzini */ 13522583f0dSPaolo Bonzini if (!work->vcpu) 13622583f0dSPaolo Bonzini continue; 13722583f0dSPaolo Bonzini 13822583f0dSPaolo Bonzini spin_unlock(&vcpu->async_pf.lock); 1399f2ceda4SDominik Dingel #ifdef CONFIG_KVM_ASYNC_PF_SYNC 1409f2ceda4SDominik Dingel flush_work(&work->work); 1419f2ceda4SDominik Dingel #else 14298fda169SRadim Krčmář if (cancel_work_sync(&work->work)) { 14341c22f62SOleg Nesterov mmput(work->mm); 14428b441e2SRadim Krčmář kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ 145af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 146af585b92SGleb Natapov } 1479f2ceda4SDominik Dingel #endif 14822583f0dSPaolo Bonzini spin_lock(&vcpu->async_pf.lock); 14928b441e2SRadim Krčmář } 150af585b92SGleb Natapov 151af585b92SGleb Natapov while (!list_empty(&vcpu->async_pf.done)) { 152af585b92SGleb Natapov struct kvm_async_pf *work = 153433da860SGeliang Tang list_first_entry(&vcpu->async_pf.done, 154af585b92SGleb Natapov typeof(*work), link); 155af585b92SGleb Natapov list_del(&work->link); 156af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 157af585b92SGleb Natapov } 158af585b92SGleb Natapov spin_unlock(&vcpu->async_pf.lock); 159af585b92SGleb Natapov 160af585b92SGleb Natapov vcpu->async_pf.queued = 0; 161af585b92SGleb Natapov } 162af585b92SGleb Natapov 163af585b92SGleb Natapov void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) 164af585b92SGleb Natapov { 165af585b92SGleb Natapov struct kvm_async_pf *work; 166af585b92SGleb Natapov 16715096ffcSXiao Guangrong while (!list_empty_careful(&vcpu->async_pf.done) && 16815096ffcSXiao Guangrong kvm_arch_can_inject_async_page_present(vcpu)) { 169af585b92SGleb Natapov spin_lock(&vcpu->async_pf.lock); 17015096ffcSXiao Guangrong work = list_first_entry(&vcpu->async_pf.done, typeof(*work), 17115096ffcSXiao Guangrong link); 172af585b92SGleb Natapov list_del(&work->link); 173af585b92SGleb Natapov spin_unlock(&vcpu->async_pf.lock); 174af585b92SGleb Natapov 17556028d08SGleb Natapov kvm_arch_async_page_ready(vcpu, work); 1761179ba53SDominik Dingel kvm_async_page_present_async(vcpu, work); 177af585b92SGleb Natapov 178af585b92SGleb Natapov list_del(&work->queue); 179af585b92SGleb Natapov vcpu->async_pf.queued--; 180af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 181af585b92SGleb Natapov } 18215096ffcSXiao Guangrong } 183af585b92SGleb Natapov 184e0ead41aSDominik Dingel int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, 185af585b92SGleb Natapov struct kvm_arch_async_pf *arch) 186af585b92SGleb Natapov { 187af585b92SGleb Natapov struct kvm_async_pf *work; 188af585b92SGleb Natapov 189af585b92SGleb Natapov if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) 190af585b92SGleb Natapov return 0; 191af585b92SGleb Natapov 192af585b92SGleb Natapov /* setup delayed work */ 193af585b92SGleb Natapov 194af585b92SGleb Natapov /* 195af585b92SGleb Natapov * do alloc nowait since if we are going to sleep anyway we 196af585b92SGleb Natapov * may as well sleep faulting in page 197af585b92SGleb Natapov */ 198d7444794SChristian Borntraeger work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); 199af585b92SGleb Natapov if (!work) 200af585b92SGleb Natapov return 0; 201af585b92SGleb Natapov 202f2e10669Schai wen work->wakeup_all = false; 203af585b92SGleb Natapov work->vcpu = vcpu; 204af585b92SGleb Natapov work->gva = gva; 205e0ead41aSDominik Dingel work->addr = hva; 206af585b92SGleb Natapov work->arch = *arch; 207af585b92SGleb Natapov work->mm = current->mm; 2083fce371bSVegard Nossum mmget(work->mm); 209af585b92SGleb Natapov kvm_get_kvm(work->vcpu->kvm); 210af585b92SGleb Natapov 211af585b92SGleb Natapov /* this can't really happen otherwise gfn_to_pfn_async 212af585b92SGleb Natapov would succeed */ 213af585b92SGleb Natapov if (unlikely(kvm_is_error_hva(work->addr))) 214af585b92SGleb Natapov goto retry_sync; 215af585b92SGleb Natapov 216af585b92SGleb Natapov INIT_WORK(&work->work, async_pf_execute); 217af585b92SGleb Natapov if (!schedule_work(&work->work)) 218af585b92SGleb Natapov goto retry_sync; 219af585b92SGleb Natapov 220af585b92SGleb Natapov list_add_tail(&work->queue, &vcpu->async_pf.queue); 221af585b92SGleb Natapov vcpu->async_pf.queued++; 222af585b92SGleb Natapov kvm_arch_async_page_not_present(vcpu, work); 223af585b92SGleb Natapov return 1; 224af585b92SGleb Natapov retry_sync: 225af585b92SGleb Natapov kvm_put_kvm(work->vcpu->kvm); 22641c22f62SOleg Nesterov mmput(work->mm); 227af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 228af585b92SGleb Natapov return 0; 229af585b92SGleb Natapov } 230344d9588SGleb Natapov 231344d9588SGleb Natapov int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) 232344d9588SGleb Natapov { 233344d9588SGleb Natapov struct kvm_async_pf *work; 234344d9588SGleb Natapov 23564f638c7SXiao Guangrong if (!list_empty_careful(&vcpu->async_pf.done)) 236344d9588SGleb Natapov return 0; 237344d9588SGleb Natapov 238344d9588SGleb Natapov work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); 239344d9588SGleb Natapov if (!work) 240344d9588SGleb Natapov return -ENOMEM; 241344d9588SGleb Natapov 242f2e10669Schai wen work->wakeup_all = true; 243344d9588SGleb Natapov INIT_LIST_HEAD(&work->queue); /* for list_del to work */ 244344d9588SGleb Natapov 24564f638c7SXiao Guangrong spin_lock(&vcpu->async_pf.lock); 246344d9588SGleb Natapov list_add_tail(&work->link, &vcpu->async_pf.done); 24764f638c7SXiao Guangrong spin_unlock(&vcpu->async_pf.lock); 24864f638c7SXiao Guangrong 249344d9588SGleb Natapov vcpu->async_pf.queued++; 250344d9588SGleb Natapov return 0; 251344d9588SGleb Natapov } 252