Lines Matching refs:arch

263 	mutex_lock(&kvm->arch.uvmem_lock);  in kvmppc_uvmem_slot_init()
264 list_add(&p->list, &kvm->arch.uvmem_pfns); in kvmppc_uvmem_slot_init()
265 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
277 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { in kvmppc_uvmem_slot_free()
286 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
294 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_mark_gfn()
338 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_gfn_is_uvmem_pfn()
368 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) in kvmppc_next_nontransitioned_gfn()
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
449 ret = uv_register_mem_slot(kvm->arch.lpid, in __kvmppc_uvmem_memslot_create()
472 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; in kvmppc_h_svm_init_start()
482 if (!kvm->arch.svm_enabled) in kvmppc_h_svm_init_start()
568 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, in __kvmppc_svm_page_out()
594 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
597 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
636 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
653 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
668 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_abort()
671 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_h_svm_init_abort()
681 kvm->arch.secure_guest = 0; in kvmppc_h_svm_init_abort()
682 uv_svm_terminate(kvm->arch.lpid); in kvmppc_h_svm_init_abort()
780 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, in kvmppc_svm_page_in()
803 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
825 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
837 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_done()
860 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; in kvmppc_h_svm_init_done()
861 pr_info("LPID %d went secure\n", kvm->arch.lpid); in kvmppc_h_svm_init_done()
890 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
903 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
908 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
918 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, in kvmppc_share_page()
924 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
946 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_in()
966 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
983 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
1055 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_out()
1093 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1097 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, in kvmppc_send_page_to_uv()
1101 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()