Lines Matching +full:- +full:kvm
1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/kvm.h>
23 #include <asm/book3s/64/mmu-hash.h>
26 #include <asm/ppc-opcode.h>
28 #include <asm/pte-walk.h>
47 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
52 /* These fields read-only after init */
53 struct kvm *kvm; member
57 /* These fields protected by kvm->arch.mmu_setup_lock */
61 * -EBUSY allocation is in the progress,
66 /* Private to the work thread, until error != -EBUSY,
67 * then protected by kvm->arch.mmu_setup_lock.
81 return -EINVAL; in kvmppc_allocate_hpt()
83 page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); in kvmppc_allocate_hpt()
92 |__GFP_NOWARN, order - PAGE_SHIFT); in kvmppc_allocate_hpt()
95 return -ENOMEM; in kvmppc_allocate_hpt()
98 npte = 1ul << (order - 4); in kvmppc_allocate_hpt()
104 kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); in kvmppc_allocate_hpt()
106 free_pages(hpt, order - PAGE_SHIFT); in kvmppc_allocate_hpt()
107 return -ENOMEM; in kvmppc_allocate_hpt()
110 info->order = order; in kvmppc_allocate_hpt()
111 info->virt = hpt; in kvmppc_allocate_hpt()
112 info->cma = cma; in kvmppc_allocate_hpt()
113 info->rev = rev; in kvmppc_allocate_hpt()
118 void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) in kvmppc_set_hpt() argument
120 atomic64_set(&kvm->arch.mmio_update, 0); in kvmppc_set_hpt()
121 kvm->arch.hpt = *info; in kvmppc_set_hpt()
122 kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); in kvmppc_set_hpt()
124 pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n", in kvmppc_set_hpt()
125 info->virt, (long)info->order, kvm->arch.lpid); in kvmppc_set_hpt()
128 int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) in kvmppc_alloc_reset_hpt() argument
130 int err = -EBUSY; in kvmppc_alloc_reset_hpt()
133 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_alloc_reset_hpt()
134 if (kvm->arch.mmu_ready) { in kvmppc_alloc_reset_hpt()
135 kvm->arch.mmu_ready = 0; in kvmppc_alloc_reset_hpt()
138 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmppc_alloc_reset_hpt()
139 kvm->arch.mmu_ready = 1; in kvmppc_alloc_reset_hpt()
143 if (kvm_is_radix(kvm)) { in kvmppc_alloc_reset_hpt()
144 err = kvmppc_switch_mmu_to_hpt(kvm); in kvmppc_alloc_reset_hpt()
149 if (kvm->arch.hpt.order == order) { in kvmppc_alloc_reset_hpt()
153 memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); in kvmppc_alloc_reset_hpt()
155 * Reset all the reverse-mapping chains for all memslots in kvmppc_alloc_reset_hpt()
157 kvmppc_rmap_reset(kvm); in kvmppc_alloc_reset_hpt()
162 if (kvm->arch.hpt.virt) { in kvmppc_alloc_reset_hpt()
163 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_alloc_reset_hpt()
164 kvmppc_rmap_reset(kvm); in kvmppc_alloc_reset_hpt()
170 kvmppc_set_hpt(kvm, &info); in kvmppc_alloc_reset_hpt()
175 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_alloc_reset_hpt()
177 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_alloc_reset_hpt()
183 vfree(info->rev); in kvmppc_free_hpt()
184 info->rev = NULL; in kvmppc_free_hpt()
185 if (info->cma) in kvmppc_free_hpt()
186 kvm_free_hpt_cma(virt_to_page((void *)info->virt), in kvmppc_free_hpt()
187 1 << (info->order - PAGE_SHIFT)); in kvmppc_free_hpt()
188 else if (info->virt) in kvmppc_free_hpt()
189 free_pages(info->virt, info->order - PAGE_SHIFT); in kvmppc_free_hpt()
190 info->virt = 0; in kvmppc_free_hpt()
191 info->order = 0; in kvmppc_free_hpt()
217 struct kvm *kvm = vcpu->kvm; in kvmppc_map_vrma() local
220 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
223 if (npages > 1ul << (40 - porder)) in kvmppc_map_vrma()
224 npages = 1ul << (40 - porder); in kvmppc_map_vrma()
226 if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) in kvmppc_map_vrma()
227 npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; in kvmppc_map_vrma()
229 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | in kvmppc_map_vrma()
238 & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_map_vrma()
248 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, in kvmppc_map_vrma()
251 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", in kvmppc_map_vrma()
263 return -EINVAL; in kvmppc_mmu_hv_init()
267 return -EINVAL; in kvmppc_mmu_hv_init()
274 /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */ in kvmppc_mmu_hv_init()
284 nr_lpids -= 1; in kvmppc_mmu_hv_init()
292 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, in kvmppc_virtmode_do_h_enter() argument
299 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, in kvmppc_virtmode_do_h_enter()
300 kvm->mm->pgd, false, pte_idx_ret); in kvmppc_virtmode_do_h_enter()
304 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); in kvmppc_virtmode_do_h_enter()
317 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvmppc_mmu_book3s_hv_find_slbe()
318 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) in kvmppc_mmu_book3s_hv_find_slbe()
321 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) in kvmppc_mmu_book3s_hv_find_slbe()
326 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) in kvmppc_mmu_book3s_hv_find_slbe()
327 return &vcpu->arch.slb[i]; in kvmppc_mmu_book3s_hv_find_slbe()
337 ra_mask = kvmppc_actual_pgsz(v, r) - 1; in kvmppc_mmu_get_real_addr()
344 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_book3s_64_hv_xlate() local
353 if (kvm_is_radix(vcpu->kvm)) in kvmppc_mmu_book3s_64_hv_xlate()
360 return -EINVAL; in kvmppc_mmu_book3s_64_hv_xlate()
361 slb_v = slbe->origv; in kvmppc_mmu_book3s_64_hv_xlate()
364 slb_v = vcpu->kvm->arch.vrma_slb_v; in kvmppc_mmu_book3s_64_hv_xlate()
369 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, in kvmppc_mmu_book3s_64_hv_xlate()
373 return -ENOENT; in kvmppc_mmu_book3s_64_hv_xlate()
375 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_mmu_book3s_64_hv_xlate()
379 gr = kvm->arch.hpt.rev[index].guest_rpte; in kvmppc_mmu_book3s_64_hv_xlate()
384 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate()
385 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate()
393 gpte->may_read = hpte_read_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
394 gpte->may_write = hpte_write_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate()
395 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); in kvmppc_mmu_book3s_64_hv_xlate()
399 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); in kvmppc_mmu_book3s_64_hv_xlate()
401 gpte->may_read = 0; in kvmppc_mmu_book3s_64_hv_xlate()
403 gpte->may_write = 0; in kvmppc_mmu_book3s_64_hv_xlate()
407 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
440 * Fast path - check if the guest physical address corresponds to a in kvmppc_hv_emulate_mmio()
447 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_hv_emulate_mmio()
450 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_hv_emulate_mmio()
501 vcpu->arch.paddr_accessed = gpa; in kvmppc_hv_emulate_mmio()
502 vcpu->arch.vaddr_accessed = ea; in kvmppc_hv_emulate_mmio()
509 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_hv_page_fault() local
528 if (kvm_is_radix(kvm)) in kvmppc_book3s_hv_page_fault()
532 * Real-mode code has already searched the HPT and found the in kvmppc_book3s_hv_page_fault()
534 * it hasn't changed. If it has, just return and re-execute the in kvmppc_book3s_hv_page_fault()
537 if (ea != vcpu->arch.pgfault_addr) in kvmppc_book3s_hv_page_fault()
540 if (vcpu->arch.pgfault_cache) { in kvmppc_book3s_hv_page_fault()
541 mmio_update = atomic64_read(&kvm->arch.mmio_update); in kvmppc_book3s_hv_page_fault()
542 if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) { in kvmppc_book3s_hv_page_fault()
543 r = vcpu->arch.pgfault_cache->rpte; in kvmppc_book3s_hv_page_fault()
544 psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0], in kvmppc_book3s_hv_page_fault()
546 gpa_base = r & HPTE_R_RPN & ~(psize - 1); in kvmppc_book3s_hv_page_fault()
548 gpa = gpa_base | (ea & (psize - 1)); in kvmppc_book3s_hv_page_fault()
553 index = vcpu->arch.pgfault_index; in kvmppc_book3s_hv_page_fault()
554 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_book3s_hv_page_fault()
555 rev = &kvm->arch.hpt.rev[index]; in kvmppc_book3s_hv_page_fault()
561 hpte[2] = r = rev->guest_rpte; in kvmppc_book3s_hv_page_fault()
569 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || in kvmppc_book3s_hv_page_fault()
570 hpte[1] != vcpu->arch.pgfault_hpte[1]) in kvmppc_book3s_hv_page_fault()
575 gpa_base = r & HPTE_R_RPN & ~(psize - 1); in kvmppc_book3s_hv_page_fault()
577 gpa = gpa_base | (ea & (psize - 1)); in kvmppc_book3s_hv_page_fault()
579 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault()
592 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
593 return -EFAULT; in kvmppc_book3s_hv_page_fault()
596 mmu_seq = kvm->mmu_invalidate_seq; in kvmppc_book3s_hv_page_fault()
599 ret = -EFAULT; in kvmppc_book3s_hv_page_fault()
615 /* Call KVM generic code to do the slow-path check */ in kvmppc_book3s_hv_page_fault()
619 return -EFAULT; in kvmppc_book3s_hv_page_fault()
632 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
633 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); in kvmppc_book3s_hv_page_fault()
637 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_hv_page_fault()
656 hpa |= hva & (pte_size - psize); in kvmppc_book3s_hv_page_fault()
672 * don't mask out lower-order bits if psize < PAGE_SIZE. in kvmppc_book3s_hv_page_fault()
676 r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa; in kvmppc_book3s_hv_page_fault()
696 if (!kvm->arch.mmu_ready) in kvmppc_book3s_hv_page_fault()
700 rev->guest_rpte != hpte[2]) in kvmppc_book3s_hv_page_fault()
706 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
711 if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) { in kvmppc_book3s_hv_page_fault()
724 kvmppc_invalidate_hpte(kvm, hptep, index); in kvmppc_book3s_hv_page_fault()
728 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); in kvmppc_book3s_hv_page_fault()
756 void kvmppc_rmap_reset(struct kvm *kvm) in kvmppc_rmap_reset() argument
762 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_rmap_reset()
763 slots = kvm_memslots(kvm); in kvmppc_rmap_reset()
766 spin_lock(&kvm->mmu_lock); in kvmppc_rmap_reset()
771 memset(memslot->arch.rmap, 0, in kvmppc_rmap_reset()
772 memslot->npages * sizeof(*memslot->arch.rmap)); in kvmppc_rmap_reset()
773 spin_unlock(&kvm->mmu_lock); in kvmppc_rmap_reset()
775 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_rmap_reset()
779 static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, in kvmppc_unmap_hpte() argument
783 __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvmppc_unmap_hpte()
784 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvmppc_unmap_hpte()
807 kvmppc_invalidate_hpte(kvm, hptep, i); in kvmppc_unmap_hpte()
812 if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap) in kvmppc_unmap_hpte()
816 note_hpte_modification(kvm, &rev[i]); in kvmppc_unmap_hpte()
821 static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_unmap_rmapp() argument
828 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp()
842 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_unmap_rmapp()
851 kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); in kvm_unmap_rmapp()
857 bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range_hv() argument
861 if (kvm_is_radix(kvm)) { in kvm_unmap_gfn_range_hv()
862 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv()
863 kvm_unmap_radix(kvm, range->slot, gfn); in kvm_unmap_gfn_range_hv()
865 for (gfn = range->start; gfn < range->end; gfn++) in kvm_unmap_gfn_range_hv()
866 kvm_unmap_rmapp(kvm, range->slot, gfn); in kvm_unmap_gfn_range_hv()
872 void kvmppc_core_flush_memslot_hv(struct kvm *kvm, in kvmppc_core_flush_memslot_hv() argument
879 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv()
880 rmapp = memslot->arch.rmap; in kvmppc_core_flush_memslot_hv()
881 if (kvm_is_radix(kvm)) { in kvmppc_core_flush_memslot_hv()
882 kvmppc_radix_flush_memslot(kvm, memslot); in kvmppc_core_flush_memslot_hv()
886 for (n = memslot->npages; n; --n, ++gfn) { in kvmppc_core_flush_memslot_hv()
894 kvm_unmap_rmapp(kvm, memslot, gfn); in kvmppc_core_flush_memslot_hv()
899 static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_age_rmapp() argument
902 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_age_rmapp()
908 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp()
922 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_age_rmapp()
940 kvmppc_clear_ref_hpte(kvm, hptep, i); in kvm_age_rmapp()
943 note_hpte_modification(kvm, &rev[i]); in kvm_age_rmapp()
954 bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn_hv() argument
959 if (kvm_is_radix(kvm)) { in kvm_age_gfn_hv()
960 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv()
961 ret |= kvm_age_radix(kvm, range->slot, gfn); in kvm_age_gfn_hv()
963 for (gfn = range->start; gfn < range->end; gfn++) in kvm_age_gfn_hv()
964 ret |= kvm_age_rmapp(kvm, range->slot, gfn); in kvm_age_gfn_hv()
970 static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_test_age_rmapp() argument
973 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_age_rmapp()
979 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp()
990 hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); in kvm_test_age_rmapp()
1003 bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn_hv() argument
1005 WARN_ON(range->start + 1 != range->end); in kvm_test_age_gfn_hv()
1007 if (kvm_is_radix(kvm)) in kvm_test_age_gfn_hv()
1008 return kvm_test_age_radix(kvm, range->slot, range->start); in kvm_test_age_gfn_hv()
1010 return kvm_test_age_rmapp(kvm, range->slot, range->start); in kvm_test_age_gfn_hv()
1013 bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_set_spte_gfn_hv() argument
1015 WARN_ON(range->start + 1 != range->end); in kvm_set_spte_gfn_hv()
1017 if (kvm_is_radix(kvm)) in kvm_set_spte_gfn_hv()
1018 kvm_unmap_radix(kvm, range->slot, range->start); in kvm_set_spte_gfn_hv()
1020 kvm_unmap_rmapp(kvm, range->slot, range->start); in kvm_set_spte_gfn_hv()
1025 static int vcpus_running(struct kvm *kvm) in vcpus_running() argument
1027 return atomic_read(&kvm->arch.vcpus_running) != 0; in vcpus_running()
1032 * This can be more than 1 if we find a huge-page HPTE.
1034 static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) in kvm_test_clear_dirty_npages() argument
1036 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_clear_dirty_npages()
1053 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_test_clear_dirty_npages()
1062 * when making the HPTE read-only. in kvm_test_clear_dirty_npages()
1072 (!hpte_is_writable(hptep1) || vcpus_running(kvm))) in kvm_test_clear_dirty_npages()
1091 kvmppc_invalidate_hpte(kvm, hptep, i); in kvm_test_clear_dirty_npages()
1098 note_hpte_modification(kvm, &rev[i]); in kvm_test_clear_dirty_npages()
1101 n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; in kvm_test_clear_dirty_npages()
1121 if (!vpa->dirty || !vpa->pinned_addr) in kvmppc_harvest_vpa_dirty()
1123 gfn = vpa->gpa >> PAGE_SHIFT; in kvmppc_harvest_vpa_dirty()
1124 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty()
1125 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty()
1128 vpa->dirty = false; in kvmppc_harvest_vpa_dirty()
1130 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty()
1133 long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, in kvmppc_hv_get_dirty_log_hpt() argument
1140 rmapp = memslot->arch.rmap; in kvmppc_hv_get_dirty_log_hpt()
1141 for (i = 0; i < memslot->npages; ++i) { in kvmppc_hv_get_dirty_log_hpt()
1142 int npages = kvm_test_clear_dirty_npages(kvm, rmapp); in kvmppc_hv_get_dirty_log_hpt()
1145 * since we always put huge-page HPTEs in the rmap chain in kvmppc_hv_get_dirty_log_hpt()
1156 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, in kvmppc_pin_guest_page() argument
1166 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_pin_guest_page()
1167 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_pin_guest_page()
1168 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_pin_guest_page()
1175 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_pin_guest_page()
1177 offset = gpa & (PAGE_SIZE - 1); in kvmppc_pin_guest_page()
1179 *nb_ret = PAGE_SIZE - offset; in kvmppc_pin_guest_page()
1183 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_pin_guest_page()
1187 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, in kvmppc_unpin_guest_page() argument
1202 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_unpin_guest_page()
1203 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unpin_guest_page()
1204 if (memslot && memslot->dirty_bitmap) in kvmppc_unpin_guest_page()
1205 set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); in kvmppc_unpin_guest_page()
1206 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_unpin_guest_page()
1216 rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); in resize_hpt_allocate()
1221 resize->hpt.virt); in resize_hpt_allocate()
1229 struct kvm *kvm = resize->kvm; in resize_hpt_rehash_hpte() local
1230 struct kvm_hpt_info *old = &kvm->arch.hpt; in resize_hpt_rehash_hpte()
1231 struct kvm_hpt_info *new = &resize->hpt; in resize_hpt_rehash_hpte()
1232 unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1; in resize_hpt_rehash_hpte()
1233 unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1; in resize_hpt_rehash_hpte()
1242 hptep = (__be64 *)(old->virt + (idx << 4)); in resize_hpt_rehash_hpte()
1267 rev = &old->rev[idx]; in resize_hpt_rehash_hpte()
1268 guest_rpte = rev->guest_rpte; in resize_hpt_rehash_hpte()
1270 ret = -EIO; in resize_hpt_rehash_hpte()
1277 int srcu_idx = srcu_read_lock(&kvm->srcu); in resize_hpt_rehash_hpte()
1279 __gfn_to_memslot(kvm_memslots(kvm), gfn); in resize_hpt_rehash_hpte()
1283 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in resize_hpt_rehash_hpte()
1286 kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn); in resize_hpt_rehash_hpte()
1290 srcu_read_unlock(&kvm->srcu, srcu_idx); in resize_hpt_rehash_hpte()
1310 avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); in resize_hpt_rehash_hpte()
1318 /* We only have 28 - 23 bits of offset in avpn */ in resize_hpt_rehash_hpte()
1329 /* We only have 40 - 23 bits of seg_off in avpn */ in resize_hpt_rehash_hpte()
1343 new_hptep = (__be64 *)(new->virt + (new_idx << 4)); in resize_hpt_rehash_hpte()
1352 BUG_ON(new->order >= old->order); in resize_hpt_rehash_hpte()
1357 ret = -ENOSPC; in resize_hpt_rehash_hpte()
1371 new->rev[new_idx].guest_rpte = guest_rpte; in resize_hpt_rehash_hpte()
1383 struct kvm *kvm = resize->kvm; in resize_hpt_rehash() local
1387 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { in resize_hpt_rehash()
1398 struct kvm *kvm = resize->kvm; in resize_hpt_pivot() local
1406 spin_lock(&kvm->mmu_lock); in resize_hpt_pivot()
1409 hpt_tmp = kvm->arch.hpt; in resize_hpt_pivot()
1410 kvmppc_set_hpt(kvm, &resize->hpt); in resize_hpt_pivot()
1411 resize->hpt = hpt_tmp; in resize_hpt_pivot()
1413 spin_unlock(&kvm->mmu_lock); in resize_hpt_pivot()
1415 synchronize_srcu_expedited(&kvm->srcu); in resize_hpt_pivot()
1418 kvmppc_setup_partition_table(kvm); in resize_hpt_pivot()
1423 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) in resize_hpt_release() argument
1425 if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock))) in resize_hpt_release()
1431 if (resize->error != -EBUSY) { in resize_hpt_release()
1432 if (resize->hpt.virt) in resize_hpt_release()
1433 kvmppc_free_hpt(&resize->hpt); in resize_hpt_release()
1437 if (kvm->arch.resize_hpt == resize) in resize_hpt_release()
1438 kvm->arch.resize_hpt = NULL; in resize_hpt_release()
1446 struct kvm *kvm = resize->kvm; in resize_hpt_prepare_work() local
1449 if (WARN_ON(resize->error != -EBUSY)) in resize_hpt_prepare_work()
1452 mutex_lock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1455 if (kvm->arch.resize_hpt == resize) { in resize_hpt_prepare_work()
1457 * do not sleep with kvm->arch.mmu_setup_lock held for a while. in resize_hpt_prepare_work()
1459 mutex_unlock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1462 resize->order); in resize_hpt_prepare_work()
1466 /* We have strict assumption about -EBUSY in resize_hpt_prepare_work()
1469 if (WARN_ON(err == -EBUSY)) in resize_hpt_prepare_work()
1470 err = -EINPROGRESS; in resize_hpt_prepare_work()
1472 mutex_lock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1473 /* It is possible that kvm->arch.resize_hpt != resize in resize_hpt_prepare_work()
1474 * after we grab kvm->arch.mmu_setup_lock again. in resize_hpt_prepare_work()
1478 resize->error = err; in resize_hpt_prepare_work()
1480 if (kvm->arch.resize_hpt != resize) in resize_hpt_prepare_work()
1481 resize_hpt_release(kvm, resize); in resize_hpt_prepare_work()
1483 mutex_unlock(&kvm->arch.mmu_setup_lock); in resize_hpt_prepare_work()
1486 int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, in kvm_vm_ioctl_resize_hpt_prepare() argument
1489 unsigned long flags = rhpt->flags; in kvm_vm_ioctl_resize_hpt_prepare()
1490 unsigned long shift = rhpt->shift; in kvm_vm_ioctl_resize_hpt_prepare()
1494 if (flags != 0 || kvm_is_radix(kvm)) in kvm_vm_ioctl_resize_hpt_prepare()
1495 return -EINVAL; in kvm_vm_ioctl_resize_hpt_prepare()
1498 return -EINVAL; in kvm_vm_ioctl_resize_hpt_prepare()
1500 mutex_lock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_prepare()
1502 resize = kvm->arch.resize_hpt; in kvm_vm_ioctl_resize_hpt_prepare()
1505 if (resize->order == shift) { in kvm_vm_ioctl_resize_hpt_prepare()
1507 ret = resize->error; in kvm_vm_ioctl_resize_hpt_prepare()
1508 if (ret == -EBUSY) in kvm_vm_ioctl_resize_hpt_prepare()
1511 resize_hpt_release(kvm, resize); in kvm_vm_ioctl_resize_hpt_prepare()
1517 resize_hpt_release(kvm, resize); in kvm_vm_ioctl_resize_hpt_prepare()
1528 ret = -ENOMEM; in kvm_vm_ioctl_resize_hpt_prepare()
1532 resize->error = -EBUSY; in kvm_vm_ioctl_resize_hpt_prepare()
1533 resize->order = shift; in kvm_vm_ioctl_resize_hpt_prepare()
1534 resize->kvm = kvm; in kvm_vm_ioctl_resize_hpt_prepare()
1535 INIT_WORK(&resize->work, resize_hpt_prepare_work); in kvm_vm_ioctl_resize_hpt_prepare()
1536 kvm->arch.resize_hpt = resize; in kvm_vm_ioctl_resize_hpt_prepare()
1538 schedule_work(&resize->work); in kvm_vm_ioctl_resize_hpt_prepare()
1543 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_prepare()
1549 /* Nothing to do, just force a KVM exit */ in resize_hpt_boot_vcpu()
1552 int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, in kvm_vm_ioctl_resize_hpt_commit() argument
1555 unsigned long flags = rhpt->flags; in kvm_vm_ioctl_resize_hpt_commit()
1556 unsigned long shift = rhpt->shift; in kvm_vm_ioctl_resize_hpt_commit()
1560 if (flags != 0 || kvm_is_radix(kvm)) in kvm_vm_ioctl_resize_hpt_commit()
1561 return -EINVAL; in kvm_vm_ioctl_resize_hpt_commit()
1564 return -EINVAL; in kvm_vm_ioctl_resize_hpt_commit()
1566 mutex_lock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_commit()
1568 resize = kvm->arch.resize_hpt; in kvm_vm_ioctl_resize_hpt_commit()
1571 ret = -EIO; in kvm_vm_ioctl_resize_hpt_commit()
1572 if (WARN_ON(!kvm->arch.mmu_ready)) in kvm_vm_ioctl_resize_hpt_commit()
1576 kvm->arch.mmu_ready = 0; in kvm_vm_ioctl_resize_hpt_commit()
1579 /* Boot all CPUs out of the guest so they re-read in kvm_vm_ioctl_resize_hpt_commit()
1583 ret = -ENXIO; in kvm_vm_ioctl_resize_hpt_commit()
1584 if (!resize || (resize->order != shift)) in kvm_vm_ioctl_resize_hpt_commit()
1587 ret = resize->error; in kvm_vm_ioctl_resize_hpt_commit()
1599 kvm->arch.mmu_ready = 1; in kvm_vm_ioctl_resize_hpt_commit()
1602 resize_hpt_release(kvm, resize); in kvm_vm_ioctl_resize_hpt_commit()
1603 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_vm_ioctl_resize_hpt_commit()
1617 * exact flag set was done. When the invalid count is non-zero
1626 struct kvm *kvm; member
1640 if (revp->guest_rpte & HPTE_GR_MODIFIED) in hpte_dirty()
1644 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); in hpte_dirty()
1689 /* re-evaluate valid and dirty from synchronized HPTE value */ in record_hpte()
1691 dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); in record_hpte()
1694 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); in record_hpte()
1696 revp->guest_rpte |= (hr & in record_hpte()
1709 r = revp->guest_rpte; in record_hpte()
1713 revp->guest_rpte = r; in record_hpte()
1728 struct kvm_htab_ctx *ctx = file->private_data; in kvm_htab_read()
1729 struct kvm *kvm = ctx->kvm; in kvm_htab_read() local
1741 return -EFAULT; in kvm_htab_read()
1742 if (kvm_is_radix(kvm)) in kvm_htab_read()
1745 first_pass = ctx->first_pass; in kvm_htab_read()
1746 flags = ctx->flags; in kvm_htab_read()
1748 i = ctx->index; in kvm_htab_read()
1749 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_read()
1750 revp = kvm->arch.hpt.rev + i; in kvm_htab_read()
1763 /* Skip uninteresting entries, i.e. clean on not-first pass */ in kvm_htab_read()
1765 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1775 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1783 return -EFAULT; in kvm_htab_read()
1791 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1804 return -EFAULT; in kvm_htab_read()
1812 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { in kvm_htab_read()
1814 ctx->first_pass = 0; in kvm_htab_read()
1819 ctx->index = i; in kvm_htab_read()
1827 struct kvm_htab_ctx *ctx = file->private_data; in kvm_htab_write()
1828 struct kvm *kvm = ctx->kvm; in kvm_htab_write() local
1841 return -EFAULT; in kvm_htab_write()
1842 if (kvm_is_radix(kvm)) in kvm_htab_write()
1843 return -EINVAL; in kvm_htab_write()
1846 mutex_lock(&kvm->arch.mmu_setup_lock); in kvm_htab_write()
1847 mmu_ready = kvm->arch.mmu_ready; in kvm_htab_write()
1849 kvm->arch.mmu_ready = 0; /* temporarily */ in kvm_htab_write()
1852 if (atomic_read(&kvm->arch.vcpus_running)) { in kvm_htab_write()
1853 kvm->arch.mmu_ready = 1; in kvm_htab_write()
1854 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_htab_write()
1855 return -EBUSY; in kvm_htab_write()
1861 err = -EFAULT; in kvm_htab_write()
1872 err = -EINVAL; in kvm_htab_write()
1874 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || in kvm_htab_write()
1875 i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) in kvm_htab_write()
1878 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_write()
1884 err = -EFAULT; in kvm_htab_write()
1890 err = -EINVAL; in kvm_htab_write()
1900 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); in kvm_htab_write()
1901 err = -EIO; in kvm_htab_write()
1902 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, in kvm_htab_write()
1912 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvm_htab_write()
1915 lpcr = senc << (LPCR_VRMASD_SH - 4); in kvm_htab_write()
1916 kvmppc_update_lpcr(kvm, lpcr, in kvm_htab_write()
1919 kvmppc_setup_partition_table(kvm); in kvm_htab_write()
1929 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); in kvm_htab_write()
1939 kvm->arch.mmu_ready = mmu_ready; in kvm_htab_write()
1940 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvm_htab_write()
1949 struct kvm_htab_ctx *ctx = filp->private_data; in kvm_htab_release()
1951 filp->private_data = NULL; in kvm_htab_release()
1952 if (!(ctx->flags & KVM_GET_HTAB_WRITE)) in kvm_htab_release()
1953 atomic_dec(&ctx->kvm->arch.hpte_mod_interest); in kvm_htab_release()
1954 kvm_put_kvm(ctx->kvm); in kvm_htab_release()
1966 int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) in kvm_vm_ioctl_get_htab_fd() argument
1973 if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) in kvm_vm_ioctl_get_htab_fd()
1974 return -EINVAL; in kvm_vm_ioctl_get_htab_fd()
1977 return -ENOMEM; in kvm_vm_ioctl_get_htab_fd()
1978 kvm_get_kvm(kvm); in kvm_vm_ioctl_get_htab_fd()
1979 ctx->kvm = kvm; in kvm_vm_ioctl_get_htab_fd()
1980 ctx->index = ghf->start_index; in kvm_vm_ioctl_get_htab_fd()
1981 ctx->flags = ghf->flags; in kvm_vm_ioctl_get_htab_fd()
1982 ctx->first_pass = 1; in kvm_vm_ioctl_get_htab_fd()
1984 rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; in kvm_vm_ioctl_get_htab_fd()
1985 ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); in kvm_vm_ioctl_get_htab_fd()
1988 kvm_put_kvm_no_destroy(kvm); in kvm_vm_ioctl_get_htab_fd()
1993 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_htab_fd()
1994 atomic_inc(&kvm->arch.hpte_mod_interest); in kvm_vm_ioctl_get_htab_fd()
1996 synchronize_srcu_expedited(&kvm->srcu); in kvm_vm_ioctl_get_htab_fd()
1997 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_htab_fd()
2004 struct kvm *kvm; member
2014 struct kvm *kvm = inode->i_private; in debugfs_htab_open() local
2019 return -ENOMEM; in debugfs_htab_open()
2021 kvm_get_kvm(kvm); in debugfs_htab_open()
2022 p->kvm = kvm; in debugfs_htab_open()
2023 mutex_init(&p->mutex); in debugfs_htab_open()
2024 file->private_data = p; in debugfs_htab_open()
2031 struct debugfs_htab_state *p = file->private_data; in debugfs_htab_release()
2033 kvm_put_kvm(p->kvm); in debugfs_htab_release()
2041 struct debugfs_htab_state *p = file->private_data; in debugfs_htab_read()
2045 struct kvm *kvm; in debugfs_htab_read() local
2048 kvm = p->kvm; in debugfs_htab_read()
2049 if (kvm_is_radix(kvm)) in debugfs_htab_read()
2052 ret = mutex_lock_interruptible(&p->mutex); in debugfs_htab_read()
2056 if (p->chars_left) { in debugfs_htab_read()
2057 n = p->chars_left; in debugfs_htab_read()
2060 r = copy_to_user(buf, p->buf + p->buf_index, n); in debugfs_htab_read()
2061 n -= r; in debugfs_htab_read()
2062 p->chars_left -= n; in debugfs_htab_read()
2063 p->buf_index += n; in debugfs_htab_read()
2065 len -= n; in debugfs_htab_read()
2069 ret = -EFAULT; in debugfs_htab_read()
2074 i = p->hpt_index; in debugfs_htab_read()
2075 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in debugfs_htab_read()
2076 for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); in debugfs_htab_read()
2087 gr = kvm->arch.hpt.rev[i].guest_rpte; in debugfs_htab_read()
2094 n = scnprintf(p->buf, sizeof(p->buf), in debugfs_htab_read()
2097 p->chars_left = n; in debugfs_htab_read()
2100 r = copy_to_user(buf, p->buf, n); in debugfs_htab_read()
2101 n -= r; in debugfs_htab_read()
2102 p->chars_left -= n; in debugfs_htab_read()
2103 p->buf_index = n; in debugfs_htab_read()
2105 len -= n; in debugfs_htab_read()
2109 ret = -EFAULT; in debugfs_htab_read()
2113 p->hpt_index = i; in debugfs_htab_read()
2116 mutex_unlock(&p->mutex); in debugfs_htab_read()
2123 return -EACCES; in debugfs_htab_write()
2135 void kvmppc_mmu_debugfs_init(struct kvm *kvm) in kvmppc_mmu_debugfs_init() argument
2137 debugfs_create_file("htab", 0400, kvm->debugfs_dentry, kvm, in kvmppc_mmu_debugfs_init()
2143 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; in kvmppc_mmu_book3s_hv_init()
2145 vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ in kvmppc_mmu_book3s_hv_init()
2147 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; in kvmppc_mmu_book3s_hv_init()
2149 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; in kvmppc_mmu_book3s_hv_init()