1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  */
6 
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/kvm.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
14 
15 #include <asm/kvm_ppc.h>
16 #include <asm/kvm_book3s.h>
17 #include <asm/page.h>
18 #include <asm/mmu.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pte-walk.h>
22 #include <asm/ultravisor.h>
23 #include <asm/kvm_book3s_uvmem.h>
24 
25 /*
26  * Supported radix tree geometry.
27  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
28  * for a page size of 64k or 4k.
29  */
30 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
31 
32 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
33 					      gva_t eaddr, void *to, void *from,
34 					      unsigned long n)
35 {
36 	int uninitialized_var(old_pid), old_lpid;
37 	unsigned long quadrant, ret = n;
38 	bool is_load = !!to;
39 
40 	/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
41 	if (kvmhv_on_pseries())
42 		return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
43 					  __pa(to), __pa(from), n);
44 
45 	quadrant = 1;
46 	if (!pid)
47 		quadrant = 2;
48 	if (is_load)
49 		from = (void *) (eaddr | (quadrant << 62));
50 	else
51 		to = (void *) (eaddr | (quadrant << 62));
52 
53 	preempt_disable();
54 
55 	/* switch the lpid first to avoid running host with unallocated pid */
56 	old_lpid = mfspr(SPRN_LPID);
57 	if (old_lpid != lpid)
58 		mtspr(SPRN_LPID, lpid);
59 	if (quadrant == 1) {
60 		old_pid = mfspr(SPRN_PID);
61 		if (old_pid != pid)
62 			mtspr(SPRN_PID, pid);
63 	}
64 	isync();
65 
66 	pagefault_disable();
67 	if (is_load)
68 		ret = raw_copy_from_user(to, from, n);
69 	else
70 		ret = raw_copy_to_user(to, from, n);
71 	pagefault_enable();
72 
73 	/* switch the pid first to avoid running host with unallocated pid */
74 	if (quadrant == 1 && pid != old_pid)
75 		mtspr(SPRN_PID, old_pid);
76 	if (lpid != old_lpid)
77 		mtspr(SPRN_LPID, old_lpid);
78 	isync();
79 
80 	preempt_enable();
81 
82 	return ret;
83 }
84 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
85 
86 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
87 					  void *to, void *from, unsigned long n)
88 {
89 	int lpid = vcpu->kvm->arch.lpid;
90 	int pid = vcpu->arch.pid;
91 
92 	/* This would cause a data segment intr so don't allow the access */
93 	if (eaddr & (0x3FFUL << 52))
94 		return -EINVAL;
95 
96 	/* Should we be using the nested lpid */
97 	if (vcpu->arch.nested)
98 		lpid = vcpu->arch.nested->shadow_lpid;
99 
100 	/* If accessing quadrant 3 then pid is expected to be 0 */
101 	if (((eaddr >> 62) & 0x3) == 0x3)
102 		pid = 0;
103 
104 	eaddr &= ~(0xFFFUL << 52);
105 
106 	return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
107 }
108 
109 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
110 				 unsigned long n)
111 {
112 	long ret;
113 
114 	ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
115 	if (ret > 0)
116 		memset(to + (n - ret), 0, ret);
117 
118 	return ret;
119 }
120 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
121 
122 long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
123 			       unsigned long n)
124 {
125 	return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
126 }
127 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
128 
129 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
130 			       struct kvmppc_pte *gpte, u64 root,
131 			       u64 *pte_ret_p)
132 {
133 	struct kvm *kvm = vcpu->kvm;
134 	int ret, level, ps;
135 	unsigned long rts, bits, offset, index;
136 	u64 pte, base, gpa;
137 	__be64 rpte;
138 
139 	rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
140 		((root & RTS2_MASK) >> RTS2_SHIFT);
141 	bits = root & RPDS_MASK;
142 	base = root & RPDB_MASK;
143 
144 	offset = rts + 31;
145 
146 	/* Current implementations only support 52-bit space */
147 	if (offset != 52)
148 		return -EINVAL;
149 
150 	/* Walk each level of the radix tree */
151 	for (level = 3; level >= 0; --level) {
152 		u64 addr;
153 		/* Check a valid size */
154 		if (level && bits != p9_supported_radix_bits[level])
155 			return -EINVAL;
156 		if (level == 0 && !(bits == 5 || bits == 9))
157 			return -EINVAL;
158 		offset -= bits;
159 		index = (eaddr >> offset) & ((1UL << bits) - 1);
160 		/* Check that low bits of page table base are zero */
161 		if (base & ((1UL << (bits + 3)) - 1))
162 			return -EINVAL;
163 		/* Read the entry from guest memory */
164 		addr = base + (index * sizeof(rpte));
165 		ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
166 		if (ret) {
167 			if (pte_ret_p)
168 				*pte_ret_p = addr;
169 			return ret;
170 		}
171 		pte = __be64_to_cpu(rpte);
172 		if (!(pte & _PAGE_PRESENT))
173 			return -ENOENT;
174 		/* Check if a leaf entry */
175 		if (pte & _PAGE_PTE)
176 			break;
177 		/* Get ready to walk the next level */
178 		base = pte & RPDB_MASK;
179 		bits = pte & RPDS_MASK;
180 	}
181 
182 	/* Need a leaf at lowest level; 512GB pages not supported */
183 	if (level < 0 || level == 3)
184 		return -EINVAL;
185 
186 	/* We found a valid leaf PTE */
187 	/* Offset is now log base 2 of the page size */
188 	gpa = pte & 0x01fffffffffff000ul;
189 	if (gpa & ((1ul << offset) - 1))
190 		return -EINVAL;
191 	gpa |= eaddr & ((1ul << offset) - 1);
192 	for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
193 		if (offset == mmu_psize_defs[ps].shift)
194 			break;
195 	gpte->page_size = ps;
196 	gpte->page_shift = offset;
197 
198 	gpte->eaddr = eaddr;
199 	gpte->raddr = gpa;
200 
201 	/* Work out permissions */
202 	gpte->may_read = !!(pte & _PAGE_READ);
203 	gpte->may_write = !!(pte & _PAGE_WRITE);
204 	gpte->may_execute = !!(pte & _PAGE_EXEC);
205 
206 	gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
207 
208 	if (pte_ret_p)
209 		*pte_ret_p = pte;
210 
211 	return 0;
212 }
213 
214 /*
215  * Used to walk a partition or process table radix tree in guest memory
216  * Note: We exploit the fact that a partition table and a process
217  * table have the same layout, a partition-scoped page table and a
218  * process-scoped page table have the same layout, and the 2nd
219  * doubleword of a partition table entry has the same layout as
220  * the PTCR register.
221  */
222 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
223 				     struct kvmppc_pte *gpte, u64 table,
224 				     int table_index, u64 *pte_ret_p)
225 {
226 	struct kvm *kvm = vcpu->kvm;
227 	int ret;
228 	unsigned long size, ptbl, root;
229 	struct prtb_entry entry;
230 
231 	if ((table & PRTS_MASK) > 24)
232 		return -EINVAL;
233 	size = 1ul << ((table & PRTS_MASK) + 12);
234 
235 	/* Is the table big enough to contain this entry? */
236 	if ((table_index * sizeof(entry)) >= size)
237 		return -EINVAL;
238 
239 	/* Read the table to find the root of the radix tree */
240 	ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
241 	ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
242 	if (ret)
243 		return ret;
244 
245 	/* Root is stored in the first double word */
246 	root = be64_to_cpu(entry.prtb0);
247 
248 	return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
249 }
250 
251 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
252 			   struct kvmppc_pte *gpte, bool data, bool iswrite)
253 {
254 	u32 pid;
255 	u64 pte;
256 	int ret;
257 
258 	/* Work out effective PID */
259 	switch (eaddr >> 62) {
260 	case 0:
261 		pid = vcpu->arch.pid;
262 		break;
263 	case 3:
264 		pid = 0;
265 		break;
266 	default:
267 		return -EINVAL;
268 	}
269 
270 	ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
271 				vcpu->kvm->arch.process_table, pid, &pte);
272 	if (ret)
273 		return ret;
274 
275 	/* Check privilege (applies only to process scoped translations) */
276 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
277 		if (pte & _PAGE_PRIVILEGED) {
278 			gpte->may_read = 0;
279 			gpte->may_write = 0;
280 			gpte->may_execute = 0;
281 		}
282 	} else {
283 		if (!(pte & _PAGE_PRIVILEGED)) {
284 			/* Check AMR/IAMR to see if strict mode is in force */
285 			if (vcpu->arch.amr & (1ul << 62))
286 				gpte->may_read = 0;
287 			if (vcpu->arch.amr & (1ul << 63))
288 				gpte->may_write = 0;
289 			if (vcpu->arch.iamr & (1ul << 62))
290 				gpte->may_execute = 0;
291 		}
292 	}
293 
294 	return 0;
295 }
296 
297 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
298 			     unsigned int pshift, unsigned int lpid)
299 {
300 	unsigned long psize = PAGE_SIZE;
301 	int psi;
302 	long rc;
303 	unsigned long rb;
304 
305 	if (pshift)
306 		psize = 1UL << pshift;
307 	else
308 		pshift = PAGE_SHIFT;
309 
310 	addr &= ~(psize - 1);
311 
312 	if (!kvmhv_on_pseries()) {
313 		radix__flush_tlb_lpid_page(lpid, addr, psize);
314 		return;
315 	}
316 
317 	psi = shift_to_mmu_psize(pshift);
318 	rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
319 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
320 				lpid, rb);
321 	if (rc)
322 		pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
323 }
324 
325 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
326 {
327 	long rc;
328 
329 	if (!kvmhv_on_pseries()) {
330 		radix__flush_pwc_lpid(lpid);
331 		return;
332 	}
333 
334 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
335 				lpid, TLBIEL_INVAL_SET_LPID);
336 	if (rc)
337 		pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
338 }
339 
340 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
341 				      unsigned long clr, unsigned long set,
342 				      unsigned long addr, unsigned int shift)
343 {
344 	return __radix_pte_update(ptep, clr, set);
345 }
346 
347 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
348 			     pte_t *ptep, pte_t pte)
349 {
350 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
351 }
352 
353 static struct kmem_cache *kvm_pte_cache;
354 static struct kmem_cache *kvm_pmd_cache;
355 
356 static pte_t *kvmppc_pte_alloc(void)
357 {
358 	return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
359 }
360 
361 static void kvmppc_pte_free(pte_t *ptep)
362 {
363 	kmem_cache_free(kvm_pte_cache, ptep);
364 }
365 
366 static pmd_t *kvmppc_pmd_alloc(void)
367 {
368 	return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
369 }
370 
371 static void kvmppc_pmd_free(pmd_t *pmdp)
372 {
373 	kmem_cache_free(kvm_pmd_cache, pmdp);
374 }
375 
376 /* Called with kvm->mmu_lock held */
377 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
378 		      unsigned int shift,
379 		      const struct kvm_memory_slot *memslot,
380 		      unsigned int lpid)
381 
382 {
383 	unsigned long old;
384 	unsigned long gfn = gpa >> PAGE_SHIFT;
385 	unsigned long page_size = PAGE_SIZE;
386 	unsigned long hpa;
387 
388 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
389 	kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
390 
391 	/* The following only applies to L1 entries */
392 	if (lpid != kvm->arch.lpid)
393 		return;
394 
395 	if (!memslot) {
396 		memslot = gfn_to_memslot(kvm, gfn);
397 		if (!memslot)
398 			return;
399 	}
400 	if (shift) { /* 1GB or 2MB page */
401 		page_size = 1ul << shift;
402 		if (shift == PMD_SHIFT)
403 			kvm->stat.num_2M_pages--;
404 		else if (shift == PUD_SHIFT)
405 			kvm->stat.num_1G_pages--;
406 	}
407 
408 	gpa &= ~(page_size - 1);
409 	hpa = old & PTE_RPN_MASK;
410 	kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
411 
412 	if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
413 		kvmppc_update_dirty_map(memslot, gfn, page_size);
414 }
415 
416 /*
417  * kvmppc_free_p?d are used to free existing page tables, and recursively
418  * descend and clear and free children.
419  * Callers are responsible for flushing the PWC.
420  *
421  * When page tables are being unmapped/freed as part of page fault path
422  * (full == false), ptes are not expected. There is code to unmap them
423  * and emit a warning if encountered, but there may already be data
424  * corruption due to the unexpected mappings.
425  */
426 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
427 				  unsigned int lpid)
428 {
429 	if (full) {
430 		memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
431 	} else {
432 		pte_t *p = pte;
433 		unsigned long it;
434 
435 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
436 			if (pte_val(*p) == 0)
437 				continue;
438 			WARN_ON_ONCE(1);
439 			kvmppc_unmap_pte(kvm, p,
440 					 pte_pfn(*p) << PAGE_SHIFT,
441 					 PAGE_SHIFT, NULL, lpid);
442 		}
443 	}
444 
445 	kvmppc_pte_free(pte);
446 }
447 
448 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
449 				  unsigned int lpid)
450 {
451 	unsigned long im;
452 	pmd_t *p = pmd;
453 
454 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
455 		if (!pmd_present(*p))
456 			continue;
457 		if (pmd_is_leaf(*p)) {
458 			if (full) {
459 				pmd_clear(p);
460 			} else {
461 				WARN_ON_ONCE(1);
462 				kvmppc_unmap_pte(kvm, (pte_t *)p,
463 					 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
464 					 PMD_SHIFT, NULL, lpid);
465 			}
466 		} else {
467 			pte_t *pte;
468 
469 			pte = pte_offset_map(p, 0);
470 			kvmppc_unmap_free_pte(kvm, pte, full, lpid);
471 			pmd_clear(p);
472 		}
473 	}
474 	kvmppc_pmd_free(pmd);
475 }
476 
477 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
478 				  unsigned int lpid)
479 {
480 	unsigned long iu;
481 	pud_t *p = pud;
482 
483 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
484 		if (!pud_present(*p))
485 			continue;
486 		if (pud_is_leaf(*p)) {
487 			pud_clear(p);
488 		} else {
489 			pmd_t *pmd;
490 
491 			pmd = pmd_offset(p, 0);
492 			kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
493 			pud_clear(p);
494 		}
495 	}
496 	pud_free(kvm->mm, pud);
497 }
498 
499 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
500 {
501 	unsigned long ig;
502 
503 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
504 		pud_t *pud;
505 
506 		if (!pgd_present(*pgd))
507 			continue;
508 		pud = pud_offset(pgd, 0);
509 		kvmppc_unmap_free_pud(kvm, pud, lpid);
510 		pgd_clear(pgd);
511 	}
512 }
513 
514 void kvmppc_free_radix(struct kvm *kvm)
515 {
516 	if (kvm->arch.pgtable) {
517 		kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
518 					  kvm->arch.lpid);
519 		pgd_free(kvm->mm, kvm->arch.pgtable);
520 		kvm->arch.pgtable = NULL;
521 	}
522 }
523 
524 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
525 					unsigned long gpa, unsigned int lpid)
526 {
527 	pte_t *pte = pte_offset_kernel(pmd, 0);
528 
529 	/*
530 	 * Clearing the pmd entry then flushing the PWC ensures that the pte
531 	 * page no longer be cached by the MMU, so can be freed without
532 	 * flushing the PWC again.
533 	 */
534 	pmd_clear(pmd);
535 	kvmppc_radix_flush_pwc(kvm, lpid);
536 
537 	kvmppc_unmap_free_pte(kvm, pte, false, lpid);
538 }
539 
540 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
541 					unsigned long gpa, unsigned int lpid)
542 {
543 	pmd_t *pmd = pmd_offset(pud, 0);
544 
545 	/*
546 	 * Clearing the pud entry then flushing the PWC ensures that the pmd
547 	 * page and any children pte pages will no longer be cached by the MMU,
548 	 * so can be freed without flushing the PWC again.
549 	 */
550 	pud_clear(pud);
551 	kvmppc_radix_flush_pwc(kvm, lpid);
552 
553 	kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
554 }
555 
556 /*
557  * There are a number of bits which may differ between different faults to
558  * the same partition scope entry. RC bits, in the course of cleaning and
559  * aging. And the write bit can change, either the access could have been
560  * upgraded, or a read fault could happen concurrently with a write fault
561  * that sets those bits first.
562  */
563 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
564 
565 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
566 		      unsigned long gpa, unsigned int level,
567 		      unsigned long mmu_seq, unsigned int lpid,
568 		      unsigned long *rmapp, struct rmap_nested **n_rmap)
569 {
570 	pgd_t *pgd;
571 	pud_t *pud, *new_pud = NULL;
572 	pmd_t *pmd, *new_pmd = NULL;
573 	pte_t *ptep, *new_ptep = NULL;
574 	int ret;
575 
576 	/* Traverse the guest's 2nd-level tree, allocate new levels needed */
577 	pgd = pgtable + pgd_index(gpa);
578 	pud = NULL;
579 	if (pgd_present(*pgd))
580 		pud = pud_offset(pgd, gpa);
581 	else
582 		new_pud = pud_alloc_one(kvm->mm, gpa);
583 
584 	pmd = NULL;
585 	if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
586 		pmd = pmd_offset(pud, gpa);
587 	else if (level <= 1)
588 		new_pmd = kvmppc_pmd_alloc();
589 
590 	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
591 		new_ptep = kvmppc_pte_alloc();
592 
593 	/* Check if we might have been invalidated; let the guest retry if so */
594 	spin_lock(&kvm->mmu_lock);
595 	ret = -EAGAIN;
596 	if (mmu_notifier_retry(kvm, mmu_seq))
597 		goto out_unlock;
598 
599 	/* Now traverse again under the lock and change the tree */
600 	ret = -ENOMEM;
601 	if (pgd_none(*pgd)) {
602 		if (!new_pud)
603 			goto out_unlock;
604 		pgd_populate(kvm->mm, pgd, new_pud);
605 		new_pud = NULL;
606 	}
607 	pud = pud_offset(pgd, gpa);
608 	if (pud_is_leaf(*pud)) {
609 		unsigned long hgpa = gpa & PUD_MASK;
610 
611 		/* Check if we raced and someone else has set the same thing */
612 		if (level == 2) {
613 			if (pud_raw(*pud) == pte_raw(pte)) {
614 				ret = 0;
615 				goto out_unlock;
616 			}
617 			/* Valid 1GB page here already, add our extra bits */
618 			WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
619 							PTE_BITS_MUST_MATCH);
620 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
621 					      0, pte_val(pte), hgpa, PUD_SHIFT);
622 			ret = 0;
623 			goto out_unlock;
624 		}
625 		/*
626 		 * If we raced with another CPU which has just put
627 		 * a 1GB pte in after we saw a pmd page, try again.
628 		 */
629 		if (!new_pmd) {
630 			ret = -EAGAIN;
631 			goto out_unlock;
632 		}
633 		/* Valid 1GB page here already, remove it */
634 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
635 				 lpid);
636 	}
637 	if (level == 2) {
638 		if (!pud_none(*pud)) {
639 			/*
640 			 * There's a page table page here, but we wanted to
641 			 * install a large page, so remove and free the page
642 			 * table page.
643 			 */
644 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
645 		}
646 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
647 		if (rmapp && n_rmap)
648 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
649 		ret = 0;
650 		goto out_unlock;
651 	}
652 	if (pud_none(*pud)) {
653 		if (!new_pmd)
654 			goto out_unlock;
655 		pud_populate(kvm->mm, pud, new_pmd);
656 		new_pmd = NULL;
657 	}
658 	pmd = pmd_offset(pud, gpa);
659 	if (pmd_is_leaf(*pmd)) {
660 		unsigned long lgpa = gpa & PMD_MASK;
661 
662 		/* Check if we raced and someone else has set the same thing */
663 		if (level == 1) {
664 			if (pmd_raw(*pmd) == pte_raw(pte)) {
665 				ret = 0;
666 				goto out_unlock;
667 			}
668 			/* Valid 2MB page here already, add our extra bits */
669 			WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
670 							PTE_BITS_MUST_MATCH);
671 			kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
672 					0, pte_val(pte), lgpa, PMD_SHIFT);
673 			ret = 0;
674 			goto out_unlock;
675 		}
676 
677 		/*
678 		 * If we raced with another CPU which has just put
679 		 * a 2MB pte in after we saw a pte page, try again.
680 		 */
681 		if (!new_ptep) {
682 			ret = -EAGAIN;
683 			goto out_unlock;
684 		}
685 		/* Valid 2MB page here already, remove it */
686 		kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
687 				 lpid);
688 	}
689 	if (level == 1) {
690 		if (!pmd_none(*pmd)) {
691 			/*
692 			 * There's a page table page here, but we wanted to
693 			 * install a large page, so remove and free the page
694 			 * table page.
695 			 */
696 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
697 		}
698 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
699 		if (rmapp && n_rmap)
700 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
701 		ret = 0;
702 		goto out_unlock;
703 	}
704 	if (pmd_none(*pmd)) {
705 		if (!new_ptep)
706 			goto out_unlock;
707 		pmd_populate(kvm->mm, pmd, new_ptep);
708 		new_ptep = NULL;
709 	}
710 	ptep = pte_offset_kernel(pmd, gpa);
711 	if (pte_present(*ptep)) {
712 		/* Check if someone else set the same thing */
713 		if (pte_raw(*ptep) == pte_raw(pte)) {
714 			ret = 0;
715 			goto out_unlock;
716 		}
717 		/* Valid page here already, add our extra bits */
718 		WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
719 							PTE_BITS_MUST_MATCH);
720 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
721 		ret = 0;
722 		goto out_unlock;
723 	}
724 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
725 	if (rmapp && n_rmap)
726 		kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
727 	ret = 0;
728 
729  out_unlock:
730 	spin_unlock(&kvm->mmu_lock);
731 	if (new_pud)
732 		pud_free(kvm->mm, new_pud);
733 	if (new_pmd)
734 		kvmppc_pmd_free(new_pmd);
735 	if (new_ptep)
736 		kvmppc_pte_free(new_ptep);
737 	return ret;
738 }
739 
740 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
741 			     unsigned long gpa, unsigned int lpid)
742 {
743 	unsigned long pgflags;
744 	unsigned int shift;
745 	pte_t *ptep;
746 
747 	/*
748 	 * Need to set an R or C bit in the 2nd-level tables;
749 	 * since we are just helping out the hardware here,
750 	 * it is sufficient to do what the hardware does.
751 	 */
752 	pgflags = _PAGE_ACCESSED;
753 	if (writing)
754 		pgflags |= _PAGE_DIRTY;
755 	/*
756 	 * We are walking the secondary (partition-scoped) page table here.
757 	 * We can do this without disabling irq because the Linux MM
758 	 * subsystem doesn't do THP splits and collapses on this tree.
759 	 */
760 	ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
761 	if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
762 		kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
763 		return true;
764 	}
765 	return false;
766 }
767 
768 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
769 				   unsigned long gpa,
770 				   struct kvm_memory_slot *memslot,
771 				   bool writing, bool kvm_ro,
772 				   pte_t *inserted_pte, unsigned int *levelp)
773 {
774 	struct kvm *kvm = vcpu->kvm;
775 	struct page *page = NULL;
776 	unsigned long mmu_seq;
777 	unsigned long hva, gfn = gpa >> PAGE_SHIFT;
778 	bool upgrade_write = false;
779 	bool *upgrade_p = &upgrade_write;
780 	pte_t pte, *ptep;
781 	unsigned int shift, level;
782 	int ret;
783 	bool large_enable;
784 
785 	/* used to check for invalidations in progress */
786 	mmu_seq = kvm->mmu_notifier_seq;
787 	smp_rmb();
788 
789 	/*
790 	 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
791 	 * do it with !atomic && !async, which is how we call it.
792 	 * We always ask for write permission since the common case
793 	 * is that the page is writable.
794 	 */
795 	hva = gfn_to_hva_memslot(memslot, gfn);
796 	if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
797 		upgrade_write = true;
798 	} else {
799 		unsigned long pfn;
800 
801 		/* Call KVM generic code to do the slow-path check */
802 		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
803 					   writing, upgrade_p);
804 		if (is_error_noslot_pfn(pfn))
805 			return -EFAULT;
806 		page = NULL;
807 		if (pfn_valid(pfn)) {
808 			page = pfn_to_page(pfn);
809 			if (PageReserved(page))
810 				page = NULL;
811 		}
812 	}
813 
814 	/*
815 	 * Read the PTE from the process' radix tree and use that
816 	 * so we get the shift and attribute bits.
817 	 */
818 	local_irq_disable();
819 	ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
820 	/*
821 	 * If the PTE disappeared temporarily due to a THP
822 	 * collapse, just return and let the guest try again.
823 	 */
824 	if (!ptep) {
825 		local_irq_enable();
826 		if (page)
827 			put_page(page);
828 		return RESUME_GUEST;
829 	}
830 	pte = *ptep;
831 	local_irq_enable();
832 
833 	/* If we're logging dirty pages, always map single pages */
834 	large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
835 
836 	/* Get pte level from shift/size */
837 	if (large_enable && shift == PUD_SHIFT &&
838 	    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
839 	    (hva & (PUD_SIZE - PAGE_SIZE))) {
840 		level = 2;
841 	} else if (large_enable && shift == PMD_SHIFT &&
842 		   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
843 		   (hva & (PMD_SIZE - PAGE_SIZE))) {
844 		level = 1;
845 	} else {
846 		level = 0;
847 		if (shift > PAGE_SHIFT) {
848 			/*
849 			 * If the pte maps more than one page, bring over
850 			 * bits from the virtual address to get the real
851 			 * address of the specific single page we want.
852 			 */
853 			unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
854 			pte = __pte(pte_val(pte) | (hva & rpnmask));
855 		}
856 	}
857 
858 	pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
859 	if (writing || upgrade_write) {
860 		if (pte_val(pte) & _PAGE_WRITE)
861 			pte = __pte(pte_val(pte) | _PAGE_DIRTY);
862 	} else {
863 		pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
864 	}
865 
866 	/* Allocate space in the tree and write the PTE */
867 	ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
868 				mmu_seq, kvm->arch.lpid, NULL, NULL);
869 	if (inserted_pte)
870 		*inserted_pte = pte;
871 	if (levelp)
872 		*levelp = level;
873 
874 	if (page) {
875 		if (!ret && (pte_val(pte) & _PAGE_WRITE))
876 			set_page_dirty_lock(page);
877 		put_page(page);
878 	}
879 
880 	/* Increment number of large pages if we (successfully) inserted one */
881 	if (!ret) {
882 		if (level == 1)
883 			kvm->stat.num_2M_pages++;
884 		else if (level == 2)
885 			kvm->stat.num_1G_pages++;
886 	}
887 
888 	return ret;
889 }
890 
891 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
892 				   unsigned long ea, unsigned long dsisr)
893 {
894 	struct kvm *kvm = vcpu->kvm;
895 	unsigned long gpa, gfn;
896 	struct kvm_memory_slot *memslot;
897 	long ret;
898 	bool writing = !!(dsisr & DSISR_ISSTORE);
899 	bool kvm_ro = false;
900 
901 	/* Check for unusual errors */
902 	if (dsisr & DSISR_UNSUPP_MMU) {
903 		pr_err("KVM: Got unsupported MMU fault\n");
904 		return -EFAULT;
905 	}
906 	if (dsisr & DSISR_BADACCESS) {
907 		/* Reflect to the guest as DSI */
908 		pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
909 		kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
910 		return RESUME_GUEST;
911 	}
912 
913 	/* Translate the logical address */
914 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
915 	gpa &= ~0xF000000000000000ul;
916 	gfn = gpa >> PAGE_SHIFT;
917 	if (!(dsisr & DSISR_PRTABLE_FAULT))
918 		gpa |= ea & 0xfff;
919 
920 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
921 		return kvmppc_send_page_to_uv(kvm, gfn);
922 
923 	/* Get the corresponding memslot */
924 	memslot = gfn_to_memslot(kvm, gfn);
925 
926 	/* No memslot means it's an emulated MMIO region */
927 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
928 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
929 			     DSISR_SET_RC)) {
930 			/*
931 			 * Bad address in guest page table tree, or other
932 			 * unusual error - reflect it to the guest as DSI.
933 			 */
934 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
935 			return RESUME_GUEST;
936 		}
937 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
938 	}
939 
940 	if (memslot->flags & KVM_MEM_READONLY) {
941 		if (writing) {
942 			/* give the guest a DSI */
943 			kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
944 						       DSISR_PROTFAULT);
945 			return RESUME_GUEST;
946 		}
947 		kvm_ro = true;
948 	}
949 
950 	/* Failed to set the reference/change bits */
951 	if (dsisr & DSISR_SET_RC) {
952 		spin_lock(&kvm->mmu_lock);
953 		if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
954 					    writing, gpa, kvm->arch.lpid))
955 			dsisr &= ~DSISR_SET_RC;
956 		spin_unlock(&kvm->mmu_lock);
957 
958 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
959 			       DSISR_PROTFAULT | DSISR_SET_RC)))
960 			return RESUME_GUEST;
961 	}
962 
963 	/* Try to insert a pte */
964 	ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
965 					     kvm_ro, NULL, NULL);
966 
967 	if (ret == 0 || ret == -EAGAIN)
968 		ret = RESUME_GUEST;
969 	return ret;
970 }
971 
972 /* Called with kvm->mmu_lock held */
973 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
974 		    unsigned long gfn)
975 {
976 	pte_t *ptep;
977 	unsigned long gpa = gfn << PAGE_SHIFT;
978 	unsigned int shift;
979 
980 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
981 		uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
982 		return 0;
983 	}
984 
985 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
986 	if (ptep && pte_present(*ptep))
987 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
988 				 kvm->arch.lpid);
989 	return 0;
990 }
991 
992 /* Called with kvm->mmu_lock held */
993 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
994 		  unsigned long gfn)
995 {
996 	pte_t *ptep;
997 	unsigned long gpa = gfn << PAGE_SHIFT;
998 	unsigned int shift;
999 	int ref = 0;
1000 	unsigned long old, *rmapp;
1001 
1002 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1003 		return ref;
1004 
1005 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1006 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
1007 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1008 					      gpa, shift);
1009 		/* XXX need to flush tlb here? */
1010 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1011 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1012 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1013 					       old & PTE_RPN_MASK,
1014 					       1UL << shift);
1015 		ref = 1;
1016 	}
1017 	return ref;
1018 }
1019 
1020 /* Called with kvm->mmu_lock held */
1021 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1022 		       unsigned long gfn)
1023 {
1024 	pte_t *ptep;
1025 	unsigned long gpa = gfn << PAGE_SHIFT;
1026 	unsigned int shift;
1027 	int ref = 0;
1028 
1029 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1030 		return ref;
1031 
1032 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1033 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
1034 		ref = 1;
1035 	return ref;
1036 }
1037 
1038 /* Returns the number of PAGE_SIZE pages that are dirty */
1039 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1040 				struct kvm_memory_slot *memslot, int pagenum)
1041 {
1042 	unsigned long gfn = memslot->base_gfn + pagenum;
1043 	unsigned long gpa = gfn << PAGE_SHIFT;
1044 	pte_t *ptep;
1045 	unsigned int shift;
1046 	int ret = 0;
1047 	unsigned long old, *rmapp;
1048 
1049 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1050 		return ret;
1051 
1052 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1053 	if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
1054 		ret = 1;
1055 		if (shift)
1056 			ret = 1 << (shift - PAGE_SHIFT);
1057 		spin_lock(&kvm->mmu_lock);
1058 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1059 					      gpa, shift);
1060 		kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1061 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1062 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1063 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1064 					       old & PTE_RPN_MASK,
1065 					       1UL << shift);
1066 		spin_unlock(&kvm->mmu_lock);
1067 	}
1068 	return ret;
1069 }
1070 
1071 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1072 			struct kvm_memory_slot *memslot, unsigned long *map)
1073 {
1074 	unsigned long i, j;
1075 	int npages;
1076 
1077 	for (i = 0; i < memslot->npages; i = j) {
1078 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1079 
1080 		/*
1081 		 * Note that if npages > 0 then i must be a multiple of npages,
1082 		 * since huge pages are only used to back the guest at guest
1083 		 * real addresses that are a multiple of their size.
1084 		 * Since we have at most one PTE covering any given guest
1085 		 * real address, if npages > 1 we can skip to i + npages.
1086 		 */
1087 		j = i + 1;
1088 		if (npages) {
1089 			set_dirty_bits(map, i, npages);
1090 			j = i + npages;
1091 		}
1092 	}
1093 	return 0;
1094 }
1095 
1096 void kvmppc_radix_flush_memslot(struct kvm *kvm,
1097 				const struct kvm_memory_slot *memslot)
1098 {
1099 	unsigned long n;
1100 	pte_t *ptep;
1101 	unsigned long gpa;
1102 	unsigned int shift;
1103 
1104 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
1105 		kvmppc_uvmem_drop_pages(memslot, kvm);
1106 
1107 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1108 		return;
1109 
1110 	gpa = memslot->base_gfn << PAGE_SHIFT;
1111 	spin_lock(&kvm->mmu_lock);
1112 	for (n = memslot->npages; n; --n) {
1113 		ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1114 		if (ptep && pte_present(*ptep))
1115 			kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1116 					 kvm->arch.lpid);
1117 		gpa += PAGE_SIZE;
1118 	}
1119 	spin_unlock(&kvm->mmu_lock);
1120 }
1121 
1122 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1123 				 int psize, int *indexp)
1124 {
1125 	if (!mmu_psize_defs[psize].shift)
1126 		return;
1127 	info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1128 		(mmu_psize_defs[psize].ap << 29);
1129 	++(*indexp);
1130 }
1131 
1132 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1133 {
1134 	int i;
1135 
1136 	if (!radix_enabled())
1137 		return -EINVAL;
1138 	memset(info, 0, sizeof(*info));
1139 
1140 	/* 4k page size */
1141 	info->geometries[0].page_shift = 12;
1142 	info->geometries[0].level_bits[0] = 9;
1143 	for (i = 1; i < 4; ++i)
1144 		info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1145 	/* 64k page size */
1146 	info->geometries[1].page_shift = 16;
1147 	for (i = 0; i < 4; ++i)
1148 		info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1149 
1150 	i = 0;
1151 	add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1152 	add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1153 	add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1154 	add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1155 
1156 	return 0;
1157 }
1158 
1159 int kvmppc_init_vm_radix(struct kvm *kvm)
1160 {
1161 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
1162 	if (!kvm->arch.pgtable)
1163 		return -ENOMEM;
1164 	return 0;
1165 }
1166 
1167 static void pte_ctor(void *addr)
1168 {
1169 	memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1170 }
1171 
1172 static void pmd_ctor(void *addr)
1173 {
1174 	memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1175 }
1176 
1177 struct debugfs_radix_state {
1178 	struct kvm	*kvm;
1179 	struct mutex	mutex;
1180 	unsigned long	gpa;
1181 	int		lpid;
1182 	int		chars_left;
1183 	int		buf_index;
1184 	char		buf[128];
1185 	u8		hdr;
1186 };
1187 
1188 static int debugfs_radix_open(struct inode *inode, struct file *file)
1189 {
1190 	struct kvm *kvm = inode->i_private;
1191 	struct debugfs_radix_state *p;
1192 
1193 	p = kzalloc(sizeof(*p), GFP_KERNEL);
1194 	if (!p)
1195 		return -ENOMEM;
1196 
1197 	kvm_get_kvm(kvm);
1198 	p->kvm = kvm;
1199 	mutex_init(&p->mutex);
1200 	file->private_data = p;
1201 
1202 	return nonseekable_open(inode, file);
1203 }
1204 
1205 static int debugfs_radix_release(struct inode *inode, struct file *file)
1206 {
1207 	struct debugfs_radix_state *p = file->private_data;
1208 
1209 	kvm_put_kvm(p->kvm);
1210 	kfree(p);
1211 	return 0;
1212 }
1213 
1214 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1215 				 size_t len, loff_t *ppos)
1216 {
1217 	struct debugfs_radix_state *p = file->private_data;
1218 	ssize_t ret, r;
1219 	unsigned long n;
1220 	struct kvm *kvm;
1221 	unsigned long gpa;
1222 	pgd_t *pgt;
1223 	struct kvm_nested_guest *nested;
1224 	pgd_t pgd, *pgdp;
1225 	pud_t pud, *pudp;
1226 	pmd_t pmd, *pmdp;
1227 	pte_t *ptep;
1228 	int shift;
1229 	unsigned long pte;
1230 
1231 	kvm = p->kvm;
1232 	if (!kvm_is_radix(kvm))
1233 		return 0;
1234 
1235 	ret = mutex_lock_interruptible(&p->mutex);
1236 	if (ret)
1237 		return ret;
1238 
1239 	if (p->chars_left) {
1240 		n = p->chars_left;
1241 		if (n > len)
1242 			n = len;
1243 		r = copy_to_user(buf, p->buf + p->buf_index, n);
1244 		n -= r;
1245 		p->chars_left -= n;
1246 		p->buf_index += n;
1247 		buf += n;
1248 		len -= n;
1249 		ret = n;
1250 		if (r) {
1251 			if (!n)
1252 				ret = -EFAULT;
1253 			goto out;
1254 		}
1255 	}
1256 
1257 	gpa = p->gpa;
1258 	nested = NULL;
1259 	pgt = NULL;
1260 	while (len != 0 && p->lpid >= 0) {
1261 		if (gpa >= RADIX_PGTABLE_RANGE) {
1262 			gpa = 0;
1263 			pgt = NULL;
1264 			if (nested) {
1265 				kvmhv_put_nested(nested);
1266 				nested = NULL;
1267 			}
1268 			p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1269 			p->hdr = 0;
1270 			if (p->lpid < 0)
1271 				break;
1272 		}
1273 		if (!pgt) {
1274 			if (p->lpid == 0) {
1275 				pgt = kvm->arch.pgtable;
1276 			} else {
1277 				nested = kvmhv_get_nested(kvm, p->lpid, false);
1278 				if (!nested) {
1279 					gpa = RADIX_PGTABLE_RANGE;
1280 					continue;
1281 				}
1282 				pgt = nested->shadow_pgtable;
1283 			}
1284 		}
1285 		n = 0;
1286 		if (!p->hdr) {
1287 			if (p->lpid > 0)
1288 				n = scnprintf(p->buf, sizeof(p->buf),
1289 					      "\nNested LPID %d: ", p->lpid);
1290 			n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1291 				      "pgdir: %lx\n", (unsigned long)pgt);
1292 			p->hdr = 1;
1293 			goto copy;
1294 		}
1295 
1296 		pgdp = pgt + pgd_index(gpa);
1297 		pgd = READ_ONCE(*pgdp);
1298 		if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
1299 			gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
1300 			continue;
1301 		}
1302 
1303 		pudp = pud_offset(&pgd, gpa);
1304 		pud = READ_ONCE(*pudp);
1305 		if (!(pud_val(pud) & _PAGE_PRESENT)) {
1306 			gpa = (gpa & PUD_MASK) + PUD_SIZE;
1307 			continue;
1308 		}
1309 		if (pud_val(pud) & _PAGE_PTE) {
1310 			pte = pud_val(pud);
1311 			shift = PUD_SHIFT;
1312 			goto leaf;
1313 		}
1314 
1315 		pmdp = pmd_offset(&pud, gpa);
1316 		pmd = READ_ONCE(*pmdp);
1317 		if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1318 			gpa = (gpa & PMD_MASK) + PMD_SIZE;
1319 			continue;
1320 		}
1321 		if (pmd_val(pmd) & _PAGE_PTE) {
1322 			pte = pmd_val(pmd);
1323 			shift = PMD_SHIFT;
1324 			goto leaf;
1325 		}
1326 
1327 		ptep = pte_offset_kernel(&pmd, gpa);
1328 		pte = pte_val(READ_ONCE(*ptep));
1329 		if (!(pte & _PAGE_PRESENT)) {
1330 			gpa += PAGE_SIZE;
1331 			continue;
1332 		}
1333 		shift = PAGE_SHIFT;
1334 	leaf:
1335 		n = scnprintf(p->buf, sizeof(p->buf),
1336 			      " %lx: %lx %d\n", gpa, pte, shift);
1337 		gpa += 1ul << shift;
1338 	copy:
1339 		p->chars_left = n;
1340 		if (n > len)
1341 			n = len;
1342 		r = copy_to_user(buf, p->buf, n);
1343 		n -= r;
1344 		p->chars_left -= n;
1345 		p->buf_index = n;
1346 		buf += n;
1347 		len -= n;
1348 		ret += n;
1349 		if (r) {
1350 			if (!ret)
1351 				ret = -EFAULT;
1352 			break;
1353 		}
1354 	}
1355 	p->gpa = gpa;
1356 	if (nested)
1357 		kvmhv_put_nested(nested);
1358 
1359  out:
1360 	mutex_unlock(&p->mutex);
1361 	return ret;
1362 }
1363 
1364 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1365 			   size_t len, loff_t *ppos)
1366 {
1367 	return -EACCES;
1368 }
1369 
1370 static const struct file_operations debugfs_radix_fops = {
1371 	.owner	 = THIS_MODULE,
1372 	.open	 = debugfs_radix_open,
1373 	.release = debugfs_radix_release,
1374 	.read	 = debugfs_radix_read,
1375 	.write	 = debugfs_radix_write,
1376 	.llseek	 = generic_file_llseek,
1377 };
1378 
1379 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1380 {
1381 	kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
1382 						     kvm->arch.debugfs_dir, kvm,
1383 						     &debugfs_radix_fops);
1384 }
1385 
1386 int kvmppc_radix_init(void)
1387 {
1388 	unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1389 
1390 	kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1391 	if (!kvm_pte_cache)
1392 		return -ENOMEM;
1393 
1394 	size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1395 
1396 	kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1397 	if (!kvm_pmd_cache) {
1398 		kmem_cache_destroy(kvm_pte_cache);
1399 		return -ENOMEM;
1400 	}
1401 
1402 	return 0;
1403 }
1404 
1405 void kvmppc_radix_exit(void)
1406 {
1407 	kmem_cache_destroy(kvm_pte_cache);
1408 	kmem_cache_destroy(kvm_pmd_cache);
1409 }
1410