1af585b92SGleb Natapov /* 2af585b92SGleb Natapov * kvm asynchronous fault support 3af585b92SGleb Natapov * 4af585b92SGleb Natapov * Copyright 2010 Red Hat, Inc. 5af585b92SGleb Natapov * 6af585b92SGleb Natapov * Author: 7af585b92SGleb Natapov * Gleb Natapov <gleb@redhat.com> 8af585b92SGleb Natapov * 9af585b92SGleb Natapov * This file is free software; you can redistribute it and/or modify 10af585b92SGleb Natapov * it under the terms of version 2 of the GNU General Public License 11af585b92SGleb Natapov * as published by the Free Software Foundation. 12af585b92SGleb Natapov * 13af585b92SGleb Natapov * This program is distributed in the hope that it will be useful, 14af585b92SGleb Natapov * but WITHOUT ANY WARRANTY; without even the implied warranty of 15af585b92SGleb Natapov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16af585b92SGleb Natapov * GNU General Public License for more details. 17af585b92SGleb Natapov * 18af585b92SGleb Natapov * You should have received a copy of the GNU General Public License 19af585b92SGleb Natapov * along with this program; if not, write to the Free Software Foundation, 20af585b92SGleb Natapov * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 21af585b92SGleb Natapov */ 22af585b92SGleb Natapov 23af585b92SGleb Natapov #include <linux/kvm_host.h> 24af585b92SGleb Natapov #include <linux/slab.h> 25af585b92SGleb Natapov #include <linux/module.h> 26af585b92SGleb Natapov #include <linux/mmu_context.h> 27af585b92SGleb Natapov 28af585b92SGleb Natapov #include "async_pf.h" 29af585b92SGleb Natapov #include <trace/events/kvm.h> 30af585b92SGleb Natapov 31af585b92SGleb Natapov static struct kmem_cache *async_pf_cache; 32af585b92SGleb Natapov 33af585b92SGleb Natapov int kvm_async_pf_init(void) 34af585b92SGleb Natapov { 35af585b92SGleb Natapov async_pf_cache = KMEM_CACHE(kvm_async_pf, 0); 36af585b92SGleb Natapov 37af585b92SGleb Natapov if (!async_pf_cache) 38af585b92SGleb Natapov return -ENOMEM; 39af585b92SGleb Natapov 40af585b92SGleb Natapov return 0; 41af585b92SGleb Natapov } 42af585b92SGleb Natapov 43af585b92SGleb Natapov void kvm_async_pf_deinit(void) 44af585b92SGleb Natapov { 45af585b92SGleb Natapov if (async_pf_cache) 46af585b92SGleb Natapov kmem_cache_destroy(async_pf_cache); 47af585b92SGleb Natapov async_pf_cache = NULL; 48af585b92SGleb Natapov } 49af585b92SGleb Natapov 50af585b92SGleb Natapov void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu) 51af585b92SGleb Natapov { 52af585b92SGleb Natapov INIT_LIST_HEAD(&vcpu->async_pf.done); 53af585b92SGleb Natapov INIT_LIST_HEAD(&vcpu->async_pf.queue); 54af585b92SGleb Natapov spin_lock_init(&vcpu->async_pf.lock); 55af585b92SGleb Natapov } 56af585b92SGleb Natapov 57af585b92SGleb Natapov static void async_pf_execute(struct work_struct *work) 58af585b92SGleb Natapov { 59af585b92SGleb Natapov struct page *page = NULL; 60af585b92SGleb Natapov struct kvm_async_pf *apf = 61af585b92SGleb Natapov container_of(work, struct kvm_async_pf, work); 62af585b92SGleb Natapov struct mm_struct *mm = apf->mm; 63af585b92SGleb Natapov struct kvm_vcpu *vcpu = apf->vcpu; 64af585b92SGleb Natapov unsigned long addr = apf->addr; 65af585b92SGleb Natapov gva_t gva = apf->gva; 66af585b92SGleb Natapov 67af585b92SGleb Natapov might_sleep(); 68af585b92SGleb Natapov 69af585b92SGleb Natapov use_mm(mm); 70af585b92SGleb Natapov down_read(&mm->mmap_sem); 71af585b92SGleb Natapov get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL); 72af585b92SGleb Natapov up_read(&mm->mmap_sem); 73af585b92SGleb Natapov unuse_mm(mm); 74af585b92SGleb Natapov 75af585b92SGleb Natapov spin_lock(&vcpu->async_pf.lock); 76af585b92SGleb Natapov list_add_tail(&apf->link, &vcpu->async_pf.done); 77af585b92SGleb Natapov apf->page = page; 78af585b92SGleb Natapov apf->done = true; 79af585b92SGleb Natapov spin_unlock(&vcpu->async_pf.lock); 80af585b92SGleb Natapov 81af585b92SGleb Natapov /* 82af585b92SGleb Natapov * apf may be freed by kvm_check_async_pf_completion() after 83af585b92SGleb Natapov * this point 84af585b92SGleb Natapov */ 85af585b92SGleb Natapov 86af585b92SGleb Natapov trace_kvm_async_pf_completed(addr, page, gva); 87af585b92SGleb Natapov 88af585b92SGleb Natapov if (waitqueue_active(&vcpu->wq)) 89af585b92SGleb Natapov wake_up_interruptible(&vcpu->wq); 90af585b92SGleb Natapov 91af585b92SGleb Natapov mmdrop(mm); 92af585b92SGleb Natapov kvm_put_kvm(vcpu->kvm); 93af585b92SGleb Natapov } 94af585b92SGleb Natapov 95af585b92SGleb Natapov void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) 96af585b92SGleb Natapov { 97af585b92SGleb Natapov /* cancel outstanding work queue item */ 98af585b92SGleb Natapov while (!list_empty(&vcpu->async_pf.queue)) { 99af585b92SGleb Natapov struct kvm_async_pf *work = 100af585b92SGleb Natapov list_entry(vcpu->async_pf.queue.next, 101af585b92SGleb Natapov typeof(*work), queue); 102af585b92SGleb Natapov cancel_work_sync(&work->work); 103af585b92SGleb Natapov list_del(&work->queue); 104af585b92SGleb Natapov if (!work->done) /* work was canceled */ 105af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 106af585b92SGleb Natapov } 107af585b92SGleb Natapov 108af585b92SGleb Natapov spin_lock(&vcpu->async_pf.lock); 109af585b92SGleb Natapov while (!list_empty(&vcpu->async_pf.done)) { 110af585b92SGleb Natapov struct kvm_async_pf *work = 111af585b92SGleb Natapov list_entry(vcpu->async_pf.done.next, 112af585b92SGleb Natapov typeof(*work), link); 113af585b92SGleb Natapov list_del(&work->link); 114af585b92SGleb Natapov if (work->page) 115af585b92SGleb Natapov put_page(work->page); 116af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 117af585b92SGleb Natapov } 118af585b92SGleb Natapov spin_unlock(&vcpu->async_pf.lock); 119af585b92SGleb Natapov 120af585b92SGleb Natapov vcpu->async_pf.queued = 0; 121af585b92SGleb Natapov } 122af585b92SGleb Natapov 123af585b92SGleb Natapov void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu) 124af585b92SGleb Natapov { 125af585b92SGleb Natapov struct kvm_async_pf *work; 126af585b92SGleb Natapov 12715096ffcSXiao Guangrong while (!list_empty_careful(&vcpu->async_pf.done) && 12815096ffcSXiao Guangrong kvm_arch_can_inject_async_page_present(vcpu)) { 129af585b92SGleb Natapov spin_lock(&vcpu->async_pf.lock); 13015096ffcSXiao Guangrong work = list_first_entry(&vcpu->async_pf.done, typeof(*work), 13115096ffcSXiao Guangrong link); 132af585b92SGleb Natapov list_del(&work->link); 133af585b92SGleb Natapov spin_unlock(&vcpu->async_pf.lock); 134af585b92SGleb Natapov 13556028d08SGleb Natapov if (work->page) 13656028d08SGleb Natapov kvm_arch_async_page_ready(vcpu, work); 137af585b92SGleb Natapov kvm_arch_async_page_present(vcpu, work); 138af585b92SGleb Natapov 139af585b92SGleb Natapov list_del(&work->queue); 140af585b92SGleb Natapov vcpu->async_pf.queued--; 141af585b92SGleb Natapov if (work->page) 142af585b92SGleb Natapov put_page(work->page); 143af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 144af585b92SGleb Natapov } 14515096ffcSXiao Guangrong } 146af585b92SGleb Natapov 147af585b92SGleb Natapov int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 148af585b92SGleb Natapov struct kvm_arch_async_pf *arch) 149af585b92SGleb Natapov { 150af585b92SGleb Natapov struct kvm_async_pf *work; 151af585b92SGleb Natapov 152af585b92SGleb Natapov if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) 153af585b92SGleb Natapov return 0; 154af585b92SGleb Natapov 155af585b92SGleb Natapov /* setup delayed work */ 156af585b92SGleb Natapov 157af585b92SGleb Natapov /* 158af585b92SGleb Natapov * do alloc nowait since if we are going to sleep anyway we 159af585b92SGleb Natapov * may as well sleep faulting in page 160af585b92SGleb Natapov */ 161af585b92SGleb Natapov work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); 162af585b92SGleb Natapov if (!work) 163af585b92SGleb Natapov return 0; 164af585b92SGleb Natapov 165af585b92SGleb Natapov work->page = NULL; 166af585b92SGleb Natapov work->done = false; 167af585b92SGleb Natapov work->vcpu = vcpu; 168af585b92SGleb Natapov work->gva = gva; 169af585b92SGleb Natapov work->addr = gfn_to_hva(vcpu->kvm, gfn); 170af585b92SGleb Natapov work->arch = *arch; 171af585b92SGleb Natapov work->mm = current->mm; 172af585b92SGleb Natapov atomic_inc(&work->mm->mm_count); 173af585b92SGleb Natapov kvm_get_kvm(work->vcpu->kvm); 174af585b92SGleb Natapov 175af585b92SGleb Natapov /* this can't really happen otherwise gfn_to_pfn_async 176af585b92SGleb Natapov would succeed */ 177af585b92SGleb Natapov if (unlikely(kvm_is_error_hva(work->addr))) 178af585b92SGleb Natapov goto retry_sync; 179af585b92SGleb Natapov 180af585b92SGleb Natapov INIT_WORK(&work->work, async_pf_execute); 181af585b92SGleb Natapov if (!schedule_work(&work->work)) 182af585b92SGleb Natapov goto retry_sync; 183af585b92SGleb Natapov 184af585b92SGleb Natapov list_add_tail(&work->queue, &vcpu->async_pf.queue); 185af585b92SGleb Natapov vcpu->async_pf.queued++; 186af585b92SGleb Natapov kvm_arch_async_page_not_present(vcpu, work); 187af585b92SGleb Natapov return 1; 188af585b92SGleb Natapov retry_sync: 189af585b92SGleb Natapov kvm_put_kvm(work->vcpu->kvm); 190af585b92SGleb Natapov mmdrop(work->mm); 191af585b92SGleb Natapov kmem_cache_free(async_pf_cache, work); 192af585b92SGleb Natapov return 0; 193af585b92SGleb Natapov } 194344d9588SGleb Natapov 195344d9588SGleb Natapov int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) 196344d9588SGleb Natapov { 197344d9588SGleb Natapov struct kvm_async_pf *work; 198344d9588SGleb Natapov 199344d9588SGleb Natapov if (!list_empty(&vcpu->async_pf.done)) 200344d9588SGleb Natapov return 0; 201344d9588SGleb Natapov 202344d9588SGleb Natapov work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); 203344d9588SGleb Natapov if (!work) 204344d9588SGleb Natapov return -ENOMEM; 205344d9588SGleb Natapov 206344d9588SGleb Natapov work->page = bad_page; 207344d9588SGleb Natapov get_page(bad_page); 208344d9588SGleb Natapov INIT_LIST_HEAD(&work->queue); /* for list_del to work */ 209344d9588SGleb Natapov 210344d9588SGleb Natapov list_add_tail(&work->link, &vcpu->async_pf.done); 211344d9588SGleb Natapov vcpu->async_pf.queued++; 212344d9588SGleb Natapov return 0; 213344d9588SGleb Natapov } 214