Lines Matching full:gp

27 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
522 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp) in kvmhv_set_nested_ptbl() argument
527 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE; in kvmhv_set_nested_ptbl()
528 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table); in kvmhv_set_nested_ptbl()
566 struct kvm_nested_guest *gp; in kvmhv_copy_tofrom_guest_nested() local
587 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false); in kvmhv_copy_tofrom_guest_nested()
588 if (!gp) { in kvmhv_copy_tofrom_guest_nested()
593 mutex_lock(&gp->tlb_lock); in kvmhv_copy_tofrom_guest_nested()
597 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, in kvmhv_copy_tofrom_guest_nested()
617 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid, in kvmhv_copy_tofrom_guest_nested()
624 mutex_unlock(&gp->tlb_lock); in kvmhv_copy_tofrom_guest_nested()
625 kvmhv_put_nested(gp); in kvmhv_copy_tofrom_guest_nested()
636 * Caller must hold gp->tlb_lock.
638 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp) in kvmhv_update_ptbl_cache() argument
643 struct kvm *kvm = gp->l1_host; in kvmhv_update_ptbl_cache()
646 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4); in kvmhv_update_ptbl_cache()
647 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) { in kvmhv_update_ptbl_cache()
654 gp->l1_gr_to_hr = 0; in kvmhv_update_ptbl_cache()
655 gp->process_table = 0; in kvmhv_update_ptbl_cache()
657 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0); in kvmhv_update_ptbl_cache()
658 gp->process_table = be64_to_cpu(ptbl_entry.patb1); in kvmhv_update_ptbl_cache()
660 kvmhv_set_nested_ptbl(gp); in kvmhv_update_ptbl_cache()
681 static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp) in __add_nested() argument
683 if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid)) in __add_nested()
694 struct kvm_nested_guest *gp; in kvmhv_alloc_nested() local
697 gp = kzalloc(sizeof(*gp), GFP_KERNEL); in kvmhv_alloc_nested()
698 if (!gp) in kvmhv_alloc_nested()
700 gp->l1_host = kvm; in kvmhv_alloc_nested()
701 gp->l1_lpid = lpid; in kvmhv_alloc_nested()
702 mutex_init(&gp->tlb_lock); in kvmhv_alloc_nested()
703 gp->shadow_pgtable = pgd_alloc(kvm->mm); in kvmhv_alloc_nested()
704 if (!gp->shadow_pgtable) in kvmhv_alloc_nested()
709 gp->shadow_lpid = shadow_lpid; in kvmhv_alloc_nested()
710 gp->radix = 1; in kvmhv_alloc_nested()
712 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); in kvmhv_alloc_nested()
714 return gp; in kvmhv_alloc_nested()
717 pgd_free(kvm->mm, gp->shadow_pgtable); in kvmhv_alloc_nested()
719 kfree(gp); in kvmhv_alloc_nested()
726 static void kvmhv_release_nested(struct kvm_nested_guest *gp) in kvmhv_release_nested() argument
728 struct kvm *kvm = gp->l1_host; in kvmhv_release_nested()
730 if (gp->shadow_pgtable) { in kvmhv_release_nested()
736 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, in kvmhv_release_nested()
737 gp->shadow_lpid); in kvmhv_release_nested()
738 pgd_free(kvm->mm, gp->shadow_pgtable); in kvmhv_release_nested()
740 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0); in kvmhv_release_nested()
741 kvmppc_free_lpid(gp->shadow_lpid); in kvmhv_release_nested()
742 kfree(gp); in kvmhv_release_nested()
745 static void kvmhv_remove_nested(struct kvm_nested_guest *gp) in kvmhv_remove_nested() argument
747 struct kvm *kvm = gp->l1_host; in kvmhv_remove_nested()
748 int lpid = gp->l1_lpid; in kvmhv_remove_nested()
752 if (gp == __find_nested(kvm, lpid)) { in kvmhv_remove_nested()
754 --gp->refcnt; in kvmhv_remove_nested()
756 ref = gp->refcnt; in kvmhv_remove_nested()
759 kvmhv_release_nested(gp); in kvmhv_remove_nested()
771 struct kvm_nested_guest *gp; in kvmhv_release_all_nested() local
777 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { in kvmhv_release_all_nested()
779 if (--gp->refcnt == 0) { in kvmhv_release_all_nested()
780 gp->next = freelist; in kvmhv_release_all_nested()
781 freelist = gp; in kvmhv_release_all_nested()
787 while ((gp = freelist) != NULL) { in kvmhv_release_all_nested()
788 freelist = gp->next; in kvmhv_release_all_nested()
789 kvmhv_release_nested(gp); in kvmhv_release_all_nested()
798 /* caller must hold gp->tlb_lock */
799 static void kvmhv_flush_nested(struct kvm_nested_guest *gp) in kvmhv_flush_nested() argument
801 struct kvm *kvm = gp->l1_host; in kvmhv_flush_nested()
804 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid); in kvmhv_flush_nested()
806 kvmhv_flush_lpid(gp->shadow_lpid); in kvmhv_flush_nested()
807 kvmhv_update_ptbl_cache(gp); in kvmhv_flush_nested()
808 if (gp->l1_gr_to_hr == 0) in kvmhv_flush_nested()
809 kvmhv_remove_nested(gp); in kvmhv_flush_nested()
815 struct kvm_nested_guest *gp, *newgp; in kvmhv_get_nested() local
821 gp = __find_nested(kvm, l1_lpid); in kvmhv_get_nested()
822 if (gp) in kvmhv_get_nested()
823 ++gp->refcnt; in kvmhv_get_nested()
826 if (gp || !create) in kvmhv_get_nested()
827 return gp; in kvmhv_get_nested()
839 gp = __find_nested(kvm, l1_lpid); in kvmhv_get_nested()
840 if (!gp) { in kvmhv_get_nested()
843 gp = newgp; in kvmhv_get_nested()
846 ++gp->refcnt; in kvmhv_get_nested()
852 return gp; in kvmhv_get_nested()
855 void kvmhv_put_nested(struct kvm_nested_guest *gp) in kvmhv_put_nested() argument
857 struct kvm *kvm = gp->l1_host; in kvmhv_put_nested()
861 ref = --gp->refcnt; in kvmhv_put_nested()
864 kvmhv_release_nested(gp); in kvmhv_put_nested()
870 struct kvm_nested_guest *gp; in find_kvm_nested_guest_pte() local
873 gp = __find_nested(kvm, lpid); in find_kvm_nested_guest_pte()
874 if (!gp) in find_kvm_nested_guest_pte()
879 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift); in find_kvm_nested_guest_pte()
972 struct kvm_nested_guest *gp; in kvmhv_remove_nest_rmap() local
979 gp = __find_nested(kvm, lpid); in kvmhv_remove_nest_rmap()
980 if (!gp) in kvmhv_remove_nest_rmap()
987 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_remove_nest_rmap()
1042 struct kvm_nested_guest *gp, in kvmhv_invalidate_shadow_pte() argument
1051 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift); in kvmhv_invalidate_shadow_pte()
1055 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid); in kvmhv_invalidate_shadow_pte()
1104 struct kvm_nested_guest *gp; in kvmhv_emulate_tlbie_tlb_addr() local
1118 gp = kvmhv_get_nested(kvm, lpid, false); in kvmhv_emulate_tlbie_tlb_addr()
1119 if (!gp) /* No such guest -> nothing to do */ in kvmhv_emulate_tlbie_tlb_addr()
1121 mutex_lock(&gp->tlb_lock); in kvmhv_emulate_tlbie_tlb_addr()
1125 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift); in kvmhv_emulate_tlbie_tlb_addr()
1131 mutex_unlock(&gp->tlb_lock); in kvmhv_emulate_tlbie_tlb_addr()
1132 kvmhv_put_nested(gp); in kvmhv_emulate_tlbie_tlb_addr()
1137 struct kvm_nested_guest *gp, int ric) in kvmhv_emulate_tlbie_lpid() argument
1141 mutex_lock(&gp->tlb_lock); in kvmhv_emulate_tlbie_lpid()
1146 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, in kvmhv_emulate_tlbie_lpid()
1147 gp->shadow_lpid); in kvmhv_emulate_tlbie_lpid()
1148 kvmhv_flush_lpid(gp->shadow_lpid); in kvmhv_emulate_tlbie_lpid()
1159 kvmhv_flush_nested(gp); in kvmhv_emulate_tlbie_lpid()
1164 mutex_unlock(&gp->tlb_lock); in kvmhv_emulate_tlbie_lpid()
1170 struct kvm_nested_guest *gp; in kvmhv_emulate_tlbie_all_lpid() local
1174 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) { in kvmhv_emulate_tlbie_all_lpid()
1176 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); in kvmhv_emulate_tlbie_all_lpid()
1186 struct kvm_nested_guest *gp; in kvmhv_emulate_priv_tlbie() local
1222 gp = kvmhv_get_nested(kvm, lpid, false); in kvmhv_emulate_priv_tlbie()
1223 if (gp) { in kvmhv_emulate_priv_tlbie()
1224 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); in kvmhv_emulate_priv_tlbie()
1225 kvmhv_put_nested(gp); in kvmhv_emulate_priv_tlbie()
1260 struct kvm_nested_guest *gp; in do_tlb_invalidate_nested_all() local
1262 gp = kvmhv_get_nested(kvm, lpid, false); in do_tlb_invalidate_nested_all()
1263 if (gp) { in do_tlb_invalidate_nested_all()
1264 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric); in do_tlb_invalidate_nested_all()
1265 kvmhv_put_nested(gp); in do_tlb_invalidate_nested_all()
1365 struct kvm_nested_guest *gp, in kvmhv_translate_addr_nested() argument
1372 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr, in kvmhv_translate_addr_nested()
1427 struct kvm_nested_guest *gp, in kvmhv_handle_nested_set_rc() argument
1455 n_gpa, gp->l1_lpid); in kvmhv_handle_nested_set_rc()
1490 /* called with gp->tlb_lock held */
1492 struct kvm_nested_guest *gp) in __kvmhv_nested_page_fault() argument
1509 if (!gp->l1_gr_to_hr) { in __kvmhv_nested_page_fault()
1510 kvmhv_update_ptbl_cache(gp); in __kvmhv_nested_page_fault()
1511 if (!gp->l1_gr_to_hr) in __kvmhv_nested_page_fault()
1520 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); in __kvmhv_nested_page_fault()
1536 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); in __kvmhv_nested_page_fault()
1648 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT); in __kvmhv_nested_page_fault()
1650 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, in __kvmhv_nested_page_fault()
1651 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); in __kvmhv_nested_page_fault()
1659 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL); in __kvmhv_nested_page_fault()
1665 struct kvm_nested_guest *gp = vcpu->arch.nested; in kvmhv_nested_page_fault() local
1668 mutex_lock(&gp->tlb_lock); in kvmhv_nested_page_fault()
1669 ret = __kvmhv_nested_page_fault(vcpu, gp); in kvmhv_nested_page_fault()
1670 mutex_unlock(&gp->tlb_lock); in kvmhv_nested_page_fault()