Lines Matching +full:- +full:kvm
1 // SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/kvm.h>
22 #include <asm/pte-walk.h>
43 /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ in __kvmhv_copy_tofrom_guest_radix()
99 int lpid = vcpu->kvm->arch.lpid; in kvmhv_copy_tofrom_guest_radix()
100 int pid = vcpu->arch.pid; in kvmhv_copy_tofrom_guest_radix()
104 return -EINVAL; in kvmhv_copy_tofrom_guest_radix()
107 if (vcpu->arch.nested) in kvmhv_copy_tofrom_guest_radix()
108 lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_copy_tofrom_guest_radix()
126 memset(to + (n - ret), 0, ret); in kvmhv_copy_from_guest_radix()
141 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_walk_radix_tree() local
147 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | in kvmppc_mmu_walk_radix_tree()
154 /* Current implementations only support 52-bit space */ in kvmppc_mmu_walk_radix_tree()
156 return -EINVAL; in kvmppc_mmu_walk_radix_tree()
159 for (level = 3; level >= 0; --level) { in kvmppc_mmu_walk_radix_tree()
163 return -EINVAL; in kvmppc_mmu_walk_radix_tree()
165 return -EINVAL; in kvmppc_mmu_walk_radix_tree()
166 offset -= bits; in kvmppc_mmu_walk_radix_tree()
167 index = (eaddr >> offset) & ((1UL << bits) - 1); in kvmppc_mmu_walk_radix_tree()
169 if (base & ((1UL << (bits + 3)) - 1)) in kvmppc_mmu_walk_radix_tree()
170 return -EINVAL; in kvmppc_mmu_walk_radix_tree()
175 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); in kvmppc_mmu_walk_radix_tree()
184 return -ENOENT; in kvmppc_mmu_walk_radix_tree()
195 return -EINVAL; in kvmppc_mmu_walk_radix_tree()
200 if (gpa & ((1ul << offset) - 1)) in kvmppc_mmu_walk_radix_tree()
201 return -EINVAL; in kvmppc_mmu_walk_radix_tree()
202 gpa |= eaddr & ((1ul << offset) - 1); in kvmppc_mmu_walk_radix_tree()
206 gpte->page_size = ps; in kvmppc_mmu_walk_radix_tree()
207 gpte->page_shift = offset; in kvmppc_mmu_walk_radix_tree()
209 gpte->eaddr = eaddr; in kvmppc_mmu_walk_radix_tree()
210 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree()
213 gpte->may_read = !!(pte & _PAGE_READ); in kvmppc_mmu_walk_radix_tree()
214 gpte->may_write = !!(pte & _PAGE_WRITE); in kvmppc_mmu_walk_radix_tree()
215 gpte->may_execute = !!(pte & _PAGE_EXEC); in kvmppc_mmu_walk_radix_tree()
217 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); in kvmppc_mmu_walk_radix_tree()
228 * table have the same layout, a partition-scoped page table and a
229 * process-scoped page table have the same layout, and the 2nd
237 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_radix_translate_table() local
243 return -EINVAL; in kvmppc_mmu_radix_translate_table()
248 return -EINVAL; in kvmppc_mmu_radix_translate_table()
253 ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); in kvmppc_mmu_radix_translate_table()
274 pid = vcpu->arch.pid; in kvmppc_mmu_radix_xlate()
280 return -EINVAL; in kvmppc_mmu_radix_xlate()
284 vcpu->kvm->arch.process_table, pid, &pte); in kvmppc_mmu_radix_xlate()
291 gpte->may_read = 0; in kvmppc_mmu_radix_xlate()
292 gpte->may_write = 0; in kvmppc_mmu_radix_xlate()
293 gpte->may_execute = 0; in kvmppc_mmu_radix_xlate()
299 gpte->may_read = 0; in kvmppc_mmu_radix_xlate()
301 gpte->may_write = 0; in kvmppc_mmu_radix_xlate()
302 if (vcpu->arch.iamr & (1ul << 62)) in kvmppc_mmu_radix_xlate()
303 gpte->may_execute = 0; in kvmppc_mmu_radix_xlate()
310 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, in kvmppc_radix_tlbie_page() argument
323 addr &= ~(psize - 1); in kvmppc_radix_tlbie_page()
345 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); in kvmppc_radix_tlbie_page()
348 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) in kvmppc_radix_flush_pwc() argument
364 0, -1UL); in kvmppc_radix_flush_pwc()
366 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); in kvmppc_radix_flush_pwc()
369 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, in kvmppc_radix_update_pte() argument
376 static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, in kvmppc_radix_set_pte_at() argument
379 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0); in kvmppc_radix_set_pte_at()
417 /* Called with kvm->mmu_lock held */
418 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument
429 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte()
430 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte()
433 if (lpid != kvm->arch.lpid) in kvmppc_unmap_pte()
437 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
444 kvm->stat.num_2M_pages--; in kvmppc_unmap_pte()
446 kvm->stat.num_1G_pages--; in kvmppc_unmap_pte()
449 gpa &= ~(page_size - 1); in kvmppc_unmap_pte()
451 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); in kvmppc_unmap_pte()
453 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) in kvmppc_unmap_pte()
469 * (or 4kB) mappings (of sub-pages of the same 2MB page).
471 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, in kvmppc_unmap_free_pte() argument
483 kvmppc_unmap_pte(kvm, p, in kvmppc_unmap_free_pte()
492 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, in kvmppc_unmap_free_pmd() argument
506 kvmppc_unmap_pte(kvm, (pte_t *)p, in kvmppc_unmap_free_pmd()
514 kvmppc_unmap_free_pte(kvm, pte, full, lpid); in kvmppc_unmap_free_pmd()
521 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, in kvmppc_unmap_free_pud() argument
536 kvmppc_unmap_free_pmd(kvm, pmd, true, lpid); in kvmppc_unmap_free_pud()
540 pud_free(kvm->mm, pud); in kvmppc_unmap_free_pud()
543 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) in kvmppc_free_pgtable_radix() argument
554 kvmppc_unmap_free_pud(kvm, pud, lpid); in kvmppc_free_pgtable_radix()
559 void kvmppc_free_radix(struct kvm *kvm) in kvmppc_free_radix() argument
561 if (kvm->arch.pgtable) { in kvmppc_free_radix()
562 kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable, in kvmppc_free_radix()
563 kvm->arch.lpid); in kvmppc_free_radix()
564 pgd_free(kvm->mm, kvm->arch.pgtable); in kvmppc_free_radix()
565 kvm->arch.pgtable = NULL; in kvmppc_free_radix()
569 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, in kvmppc_unmap_free_pmd_entry_table() argument
580 kvmppc_radix_flush_pwc(kvm, lpid); in kvmppc_unmap_free_pmd_entry_table()
582 kvmppc_unmap_free_pte(kvm, pte, false, lpid); in kvmppc_unmap_free_pmd_entry_table()
585 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, in kvmppc_unmap_free_pud_entry_table() argument
596 kvmppc_radix_flush_pwc(kvm, lpid); in kvmppc_unmap_free_pud_entry_table()
598 kvmppc_unmap_free_pmd(kvm, pmd, false, lpid); in kvmppc_unmap_free_pud_entry_table()
610 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, in kvmppc_create_pte() argument
622 /* Traverse the guest's 2nd-level tree, allocate new levels needed */ in kvmppc_create_pte()
630 new_pud = pud_alloc_one(kvm->mm, gpa); in kvmppc_create_pte()
642 spin_lock(&kvm->mmu_lock); in kvmppc_create_pte()
643 ret = -EAGAIN; in kvmppc_create_pte()
644 if (mmu_invalidate_retry(kvm, mmu_seq)) in kvmppc_create_pte()
648 ret = -ENOMEM; in kvmppc_create_pte()
652 p4d_populate(kvm->mm, p4d, new_pud); in kvmppc_create_pte()
668 kvmppc_radix_update_pte(kvm, (pte_t *)pud, in kvmppc_create_pte()
678 ret = -EAGAIN; in kvmppc_create_pte()
682 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL, in kvmppc_create_pte()
692 kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); in kvmppc_create_pte()
694 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); in kvmppc_create_pte()
696 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); in kvmppc_create_pte()
703 pud_populate(kvm->mm, pud, new_pmd); in kvmppc_create_pte()
719 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), in kvmppc_create_pte()
730 ret = -EAGAIN; in kvmppc_create_pte()
734 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL, in kvmppc_create_pte()
744 kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); in kvmppc_create_pte()
746 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); in kvmppc_create_pte()
748 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); in kvmppc_create_pte()
755 pmd_populate(kvm->mm, pmd, new_ptep); in kvmppc_create_pte()
768 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0); in kvmppc_create_pte()
772 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); in kvmppc_create_pte()
774 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); in kvmppc_create_pte()
778 spin_unlock(&kvm->mmu_lock); in kvmppc_create_pte()
780 pud_free(kvm->mm, new_pud); in kvmppc_create_pte()
788 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, in kvmppc_hv_handle_set_rc() argument
796 * Need to set an R or C bit in the 2nd-level tables; in kvmppc_hv_handle_set_rc()
805 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); in kvmppc_hv_handle_set_rc()
807 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvmppc_hv_handle_set_rc()
810 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); in kvmppc_hv_handle_set_rc()
822 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_instantiate_page() local
834 mmu_seq = kvm->mmu_invalidate_seq; in kvmppc_book3s_instantiate_page()
849 /* Call KVM generic code to do the slow-path check */ in kvmppc_book3s_instantiate_page()
853 return -EFAULT; in kvmppc_book3s_instantiate_page()
866 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
867 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); in kvmppc_book3s_instantiate_page()
871 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
883 large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES); in kvmppc_book3s_instantiate_page()
887 (gpa & (PUD_SIZE - PAGE_SIZE)) == in kvmppc_book3s_instantiate_page()
888 (hva & (PUD_SIZE - PAGE_SIZE))) { in kvmppc_book3s_instantiate_page()
891 (gpa & (PMD_SIZE - PAGE_SIZE)) == in kvmppc_book3s_instantiate_page()
892 (hva & (PMD_SIZE - PAGE_SIZE))) { in kvmppc_book3s_instantiate_page()
902 unsigned long rpnmask = (1ul << shift) - PAGE_SIZE; in kvmppc_book3s_instantiate_page()
916 ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, in kvmppc_book3s_instantiate_page()
917 mmu_seq, kvm->arch.lpid, NULL, NULL); in kvmppc_book3s_instantiate_page()
932 kvm->stat.num_2M_pages++; in kvmppc_book3s_instantiate_page()
934 kvm->stat.num_1G_pages++; in kvmppc_book3s_instantiate_page()
943 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_radix_page_fault() local
952 pr_err("KVM: Got unsupported MMU fault\n"); in kvmppc_book3s_radix_page_fault()
953 return -EFAULT; in kvmppc_book3s_radix_page_fault()
957 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); in kvmppc_book3s_radix_page_fault()
965 gpa = vcpu->arch.fault_gpa & ~0xfffUL; in kvmppc_book3s_radix_page_fault()
971 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_book3s_radix_page_fault()
972 return kvmppc_send_page_to_uv(kvm, gfn); in kvmppc_book3s_radix_page_fault()
975 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault()
978 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { in kvmppc_book3s_radix_page_fault()
983 * unusual error - reflect it to the guest as DSI. in kvmppc_book3s_radix_page_fault()
993 if (memslot->flags & KVM_MEM_READONLY) { in kvmppc_book3s_radix_page_fault()
1006 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1007 if (kvmppc_hv_handle_set_rc(kvm, false, writing, in kvmppc_book3s_radix_page_fault()
1008 gpa, kvm->arch.lpid)) in kvmppc_book3s_radix_page_fault()
1010 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1021 if (ret == 0 || ret == -EAGAIN) in kvmppc_book3s_radix_page_fault()
1026 /* Called with kvm->mmu_lock held */
1027 void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_unmap_radix() argument
1034 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { in kvm_unmap_radix()
1035 uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); in kvm_unmap_radix()
1039 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvm_unmap_radix()
1041 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, in kvm_unmap_radix()
1042 kvm->arch.lpid); in kvm_unmap_radix()
1045 /* Called with kvm->mmu_lock held */
1046 bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_age_radix() argument
1055 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvm_age_radix()
1058 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvm_age_radix()
1060 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, in kvm_age_radix()
1064 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_radix()
1065 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, in kvm_age_radix()
1073 /* Called with kvm->mmu_lock held */
1074 bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_test_age_radix() argument
1083 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvm_test_age_radix()
1086 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvm_test_age_radix()
1093 static int kvm_radix_test_clear_dirty(struct kvm *kvm, in kvm_radix_test_clear_dirty() argument
1096 unsigned long gfn = memslot->base_gfn + pagenum; in kvm_radix_test_clear_dirty()
1103 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvm_radix_test_clear_dirty()
1107 * For performance reasons we don't hold kvm->mmu_lock while walking the in kvm_radix_test_clear_dirty()
1110 ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift); in kvm_radix_test_clear_dirty()
1116 spin_lock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1128 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1135 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, in kvm_radix_test_clear_dirty()
1137 kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); in kvm_radix_test_clear_dirty()
1139 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_radix_test_clear_dirty()
1140 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0, in kvm_radix_test_clear_dirty()
1143 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1148 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, in kvmppc_hv_get_dirty_log_radix() argument
1154 for (i = 0; i < memslot->npages; i = j) { in kvmppc_hv_get_dirty_log_radix()
1155 npages = kvm_radix_test_clear_dirty(kvm, memslot, i); in kvmppc_hv_get_dirty_log_radix()
1173 void kvmppc_radix_flush_memslot(struct kvm *kvm, in kvmppc_radix_flush_memslot() argument
1181 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START) in kvmppc_radix_flush_memslot()
1182 kvmppc_uvmem_drop_pages(memslot, kvm, true); in kvmppc_radix_flush_memslot()
1184 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_radix_flush_memslot()
1187 gpa = memslot->base_gfn << PAGE_SHIFT; in kvmppc_radix_flush_memslot()
1188 spin_lock(&kvm->mmu_lock); in kvmppc_radix_flush_memslot()
1189 for (n = memslot->npages; n; --n) { in kvmppc_radix_flush_memslot()
1190 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvmppc_radix_flush_memslot()
1192 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, in kvmppc_radix_flush_memslot()
1193 kvm->arch.lpid); in kvmppc_radix_flush_memslot()
1200 kvm->mmu_invalidate_seq++; in kvmppc_radix_flush_memslot()
1201 spin_unlock(&kvm->mmu_lock); in kvmppc_radix_flush_memslot()
1209 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift | in add_rmmu_ap_encoding()
1214 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info) in kvmhv_get_rmmu_info() argument
1219 return -EINVAL; in kvmhv_get_rmmu_info()
1223 info->geometries[0].page_shift = 12; in kvmhv_get_rmmu_info()
1224 info->geometries[0].level_bits[0] = 9; in kvmhv_get_rmmu_info()
1226 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i]; in kvmhv_get_rmmu_info()
1228 info->geometries[1].page_shift = 16; in kvmhv_get_rmmu_info()
1230 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i]; in kvmhv_get_rmmu_info()
1241 int kvmppc_init_vm_radix(struct kvm *kvm) in kvmppc_init_vm_radix() argument
1243 kvm->arch.pgtable = pgd_alloc(kvm->mm); in kvmppc_init_vm_radix()
1244 if (!kvm->arch.pgtable) in kvmppc_init_vm_radix()
1245 return -ENOMEM; in kvmppc_init_vm_radix()
1260 struct kvm *kvm; member
1272 struct kvm *kvm = inode->i_private; in debugfs_radix_open() local
1277 return -ENOMEM; in debugfs_radix_open()
1279 kvm_get_kvm(kvm); in debugfs_radix_open()
1280 p->kvm = kvm; in debugfs_radix_open()
1281 mutex_init(&p->mutex); in debugfs_radix_open()
1282 file->private_data = p; in debugfs_radix_open()
1289 struct debugfs_radix_state *p = file->private_data; in debugfs_radix_release()
1291 kvm_put_kvm(p->kvm); in debugfs_radix_release()
1299 struct debugfs_radix_state *p = file->private_data; in debugfs_radix_read()
1302 struct kvm *kvm; in debugfs_radix_read() local
1314 kvm = p->kvm; in debugfs_radix_read()
1315 if (!kvm_is_radix(kvm)) in debugfs_radix_read()
1318 ret = mutex_lock_interruptible(&p->mutex); in debugfs_radix_read()
1322 if (p->chars_left) { in debugfs_radix_read()
1323 n = p->chars_left; in debugfs_radix_read()
1326 r = copy_to_user(buf, p->buf + p->buf_index, n); in debugfs_radix_read()
1327 n -= r; in debugfs_radix_read()
1328 p->chars_left -= n; in debugfs_radix_read()
1329 p->buf_index += n; in debugfs_radix_read()
1331 len -= n; in debugfs_radix_read()
1335 ret = -EFAULT; in debugfs_radix_read()
1340 gpa = p->gpa; in debugfs_radix_read()
1343 while (len != 0 && p->lpid >= 0) { in debugfs_radix_read()
1351 p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid); in debugfs_radix_read()
1352 p->hdr = 0; in debugfs_radix_read()
1353 if (p->lpid < 0) in debugfs_radix_read()
1357 if (p->lpid == 0) { in debugfs_radix_read()
1358 pgt = kvm->arch.pgtable; in debugfs_radix_read()
1360 nested = kvmhv_get_nested(kvm, p->lpid, false); in debugfs_radix_read()
1365 pgt = nested->shadow_pgtable; in debugfs_radix_read()
1369 if (!p->hdr) { in debugfs_radix_read()
1370 if (p->lpid > 0) in debugfs_radix_read()
1371 n = scnprintf(p->buf, sizeof(p->buf), in debugfs_radix_read()
1372 "\nNested LPID %d: ", p->lpid); in debugfs_radix_read()
1373 n += scnprintf(p->buf + n, sizeof(p->buf) - n, in debugfs_radix_read()
1375 p->hdr = 1; in debugfs_radix_read()
1419 n = scnprintf(p->buf, sizeof(p->buf), in debugfs_radix_read()
1423 p->chars_left = n; in debugfs_radix_read()
1426 r = copy_to_user(buf, p->buf, n); in debugfs_radix_read()
1427 n -= r; in debugfs_radix_read()
1428 p->chars_left -= n; in debugfs_radix_read()
1429 p->buf_index = n; in debugfs_radix_read()
1431 len -= n; in debugfs_radix_read()
1435 ret = -EFAULT; in debugfs_radix_read()
1439 p->gpa = gpa; in debugfs_radix_read()
1444 mutex_unlock(&p->mutex); in debugfs_radix_read()
1451 return -EACCES; in debugfs_radix_write()
1463 void kvmhv_radix_debugfs_init(struct kvm *kvm) in kvmhv_radix_debugfs_init() argument
1465 debugfs_create_file("radix", 0400, kvm->debugfs_dentry, kvm, in kvmhv_radix_debugfs_init()
1473 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor); in kvmppc_radix_init()
1475 return -ENOMEM; in kvmppc_radix_init()
1479 kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor); in kvmppc_radix_init()
1482 return -ENOMEM; in kvmppc_radix_init()