1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  */
6 
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/kvm.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
14 #include <linux/pgtable.h>
15 
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/page.h>
19 #include <asm/mmu.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pte-walk.h>
22 #include <asm/ultravisor.h>
23 #include <asm/kvm_book3s_uvmem.h>
24 
25 /*
26  * Supported radix tree geometry.
27  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
28  * for a page size of 64k or 4k.
29  */
30 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
31 
32 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
33 					      gva_t eaddr, void *to, void *from,
34 					      unsigned long n)
35 {
36 	int uninitialized_var(old_pid), old_lpid;
37 	unsigned long quadrant, ret = n;
38 	bool is_load = !!to;
39 
40 	/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
41 	if (kvmhv_on_pseries())
42 		return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
43 					  __pa(to), __pa(from), n);
44 
45 	quadrant = 1;
46 	if (!pid)
47 		quadrant = 2;
48 	if (is_load)
49 		from = (void *) (eaddr | (quadrant << 62));
50 	else
51 		to = (void *) (eaddr | (quadrant << 62));
52 
53 	preempt_disable();
54 
55 	/* switch the lpid first to avoid running host with unallocated pid */
56 	old_lpid = mfspr(SPRN_LPID);
57 	if (old_lpid != lpid)
58 		mtspr(SPRN_LPID, lpid);
59 	if (quadrant == 1) {
60 		old_pid = mfspr(SPRN_PID);
61 		if (old_pid != pid)
62 			mtspr(SPRN_PID, pid);
63 	}
64 	isync();
65 
66 	if (is_load)
67 		ret = copy_from_user_nofault(to, (const void __user *)from, n);
68 	else
69 		ret = copy_to_user_nofault((void __user *)to, from, n);
70 
71 	/* switch the pid first to avoid running host with unallocated pid */
72 	if (quadrant == 1 && pid != old_pid)
73 		mtspr(SPRN_PID, old_pid);
74 	if (lpid != old_lpid)
75 		mtspr(SPRN_LPID, old_lpid);
76 	isync();
77 
78 	preempt_enable();
79 
80 	return ret;
81 }
82 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
83 
84 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
85 					  void *to, void *from, unsigned long n)
86 {
87 	int lpid = vcpu->kvm->arch.lpid;
88 	int pid = vcpu->arch.pid;
89 
90 	/* This would cause a data segment intr so don't allow the access */
91 	if (eaddr & (0x3FFUL << 52))
92 		return -EINVAL;
93 
94 	/* Should we be using the nested lpid */
95 	if (vcpu->arch.nested)
96 		lpid = vcpu->arch.nested->shadow_lpid;
97 
98 	/* If accessing quadrant 3 then pid is expected to be 0 */
99 	if (((eaddr >> 62) & 0x3) == 0x3)
100 		pid = 0;
101 
102 	eaddr &= ~(0xFFFUL << 52);
103 
104 	return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
105 }
106 
107 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
108 				 unsigned long n)
109 {
110 	long ret;
111 
112 	ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
113 	if (ret > 0)
114 		memset(to + (n - ret), 0, ret);
115 
116 	return ret;
117 }
118 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
119 
120 long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
121 			       unsigned long n)
122 {
123 	return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
124 }
125 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
126 
127 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
128 			       struct kvmppc_pte *gpte, u64 root,
129 			       u64 *pte_ret_p)
130 {
131 	struct kvm *kvm = vcpu->kvm;
132 	int ret, level, ps;
133 	unsigned long rts, bits, offset, index;
134 	u64 pte, base, gpa;
135 	__be64 rpte;
136 
137 	rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
138 		((root & RTS2_MASK) >> RTS2_SHIFT);
139 	bits = root & RPDS_MASK;
140 	base = root & RPDB_MASK;
141 
142 	offset = rts + 31;
143 
144 	/* Current implementations only support 52-bit space */
145 	if (offset != 52)
146 		return -EINVAL;
147 
148 	/* Walk each level of the radix tree */
149 	for (level = 3; level >= 0; --level) {
150 		u64 addr;
151 		/* Check a valid size */
152 		if (level && bits != p9_supported_radix_bits[level])
153 			return -EINVAL;
154 		if (level == 0 && !(bits == 5 || bits == 9))
155 			return -EINVAL;
156 		offset -= bits;
157 		index = (eaddr >> offset) & ((1UL << bits) - 1);
158 		/* Check that low bits of page table base are zero */
159 		if (base & ((1UL << (bits + 3)) - 1))
160 			return -EINVAL;
161 		/* Read the entry from guest memory */
162 		addr = base + (index * sizeof(rpte));
163 		ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
164 		if (ret) {
165 			if (pte_ret_p)
166 				*pte_ret_p = addr;
167 			return ret;
168 		}
169 		pte = __be64_to_cpu(rpte);
170 		if (!(pte & _PAGE_PRESENT))
171 			return -ENOENT;
172 		/* Check if a leaf entry */
173 		if (pte & _PAGE_PTE)
174 			break;
175 		/* Get ready to walk the next level */
176 		base = pte & RPDB_MASK;
177 		bits = pte & RPDS_MASK;
178 	}
179 
180 	/* Need a leaf at lowest level; 512GB pages not supported */
181 	if (level < 0 || level == 3)
182 		return -EINVAL;
183 
184 	/* We found a valid leaf PTE */
185 	/* Offset is now log base 2 of the page size */
186 	gpa = pte & 0x01fffffffffff000ul;
187 	if (gpa & ((1ul << offset) - 1))
188 		return -EINVAL;
189 	gpa |= eaddr & ((1ul << offset) - 1);
190 	for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
191 		if (offset == mmu_psize_defs[ps].shift)
192 			break;
193 	gpte->page_size = ps;
194 	gpte->page_shift = offset;
195 
196 	gpte->eaddr = eaddr;
197 	gpte->raddr = gpa;
198 
199 	/* Work out permissions */
200 	gpte->may_read = !!(pte & _PAGE_READ);
201 	gpte->may_write = !!(pte & _PAGE_WRITE);
202 	gpte->may_execute = !!(pte & _PAGE_EXEC);
203 
204 	gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
205 
206 	if (pte_ret_p)
207 		*pte_ret_p = pte;
208 
209 	return 0;
210 }
211 
212 /*
213  * Used to walk a partition or process table radix tree in guest memory
214  * Note: We exploit the fact that a partition table and a process
215  * table have the same layout, a partition-scoped page table and a
216  * process-scoped page table have the same layout, and the 2nd
217  * doubleword of a partition table entry has the same layout as
218  * the PTCR register.
219  */
220 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
221 				     struct kvmppc_pte *gpte, u64 table,
222 				     int table_index, u64 *pte_ret_p)
223 {
224 	struct kvm *kvm = vcpu->kvm;
225 	int ret;
226 	unsigned long size, ptbl, root;
227 	struct prtb_entry entry;
228 
229 	if ((table & PRTS_MASK) > 24)
230 		return -EINVAL;
231 	size = 1ul << ((table & PRTS_MASK) + 12);
232 
233 	/* Is the table big enough to contain this entry? */
234 	if ((table_index * sizeof(entry)) >= size)
235 		return -EINVAL;
236 
237 	/* Read the table to find the root of the radix tree */
238 	ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
239 	ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
240 	if (ret)
241 		return ret;
242 
243 	/* Root is stored in the first double word */
244 	root = be64_to_cpu(entry.prtb0);
245 
246 	return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
247 }
248 
249 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
250 			   struct kvmppc_pte *gpte, bool data, bool iswrite)
251 {
252 	u32 pid;
253 	u64 pte;
254 	int ret;
255 
256 	/* Work out effective PID */
257 	switch (eaddr >> 62) {
258 	case 0:
259 		pid = vcpu->arch.pid;
260 		break;
261 	case 3:
262 		pid = 0;
263 		break;
264 	default:
265 		return -EINVAL;
266 	}
267 
268 	ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
269 				vcpu->kvm->arch.process_table, pid, &pte);
270 	if (ret)
271 		return ret;
272 
273 	/* Check privilege (applies only to process scoped translations) */
274 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
275 		if (pte & _PAGE_PRIVILEGED) {
276 			gpte->may_read = 0;
277 			gpte->may_write = 0;
278 			gpte->may_execute = 0;
279 		}
280 	} else {
281 		if (!(pte & _PAGE_PRIVILEGED)) {
282 			/* Check AMR/IAMR to see if strict mode is in force */
283 			if (vcpu->arch.amr & (1ul << 62))
284 				gpte->may_read = 0;
285 			if (vcpu->arch.amr & (1ul << 63))
286 				gpte->may_write = 0;
287 			if (vcpu->arch.iamr & (1ul << 62))
288 				gpte->may_execute = 0;
289 		}
290 	}
291 
292 	return 0;
293 }
294 
295 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
296 			     unsigned int pshift, unsigned int lpid)
297 {
298 	unsigned long psize = PAGE_SIZE;
299 	int psi;
300 	long rc;
301 	unsigned long rb;
302 
303 	if (pshift)
304 		psize = 1UL << pshift;
305 	else
306 		pshift = PAGE_SHIFT;
307 
308 	addr &= ~(psize - 1);
309 
310 	if (!kvmhv_on_pseries()) {
311 		radix__flush_tlb_lpid_page(lpid, addr, psize);
312 		return;
313 	}
314 
315 	psi = shift_to_mmu_psize(pshift);
316 	rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
317 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
318 				lpid, rb);
319 	if (rc)
320 		pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
321 }
322 
323 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
324 {
325 	long rc;
326 
327 	if (!kvmhv_on_pseries()) {
328 		radix__flush_pwc_lpid(lpid);
329 		return;
330 	}
331 
332 	rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
333 				lpid, TLBIEL_INVAL_SET_LPID);
334 	if (rc)
335 		pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
336 }
337 
338 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
339 				      unsigned long clr, unsigned long set,
340 				      unsigned long addr, unsigned int shift)
341 {
342 	return __radix_pte_update(ptep, clr, set);
343 }
344 
345 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
346 			     pte_t *ptep, pte_t pte)
347 {
348 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
349 }
350 
351 static struct kmem_cache *kvm_pte_cache;
352 static struct kmem_cache *kvm_pmd_cache;
353 
354 static pte_t *kvmppc_pte_alloc(void)
355 {
356 	pte_t *pte;
357 
358 	pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
359 	/* pmd_populate() will only reference _pa(pte). */
360 	kmemleak_ignore(pte);
361 
362 	return pte;
363 }
364 
365 static void kvmppc_pte_free(pte_t *ptep)
366 {
367 	kmem_cache_free(kvm_pte_cache, ptep);
368 }
369 
370 static pmd_t *kvmppc_pmd_alloc(void)
371 {
372 	pmd_t *pmd;
373 
374 	pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
375 	/* pud_populate() will only reference _pa(pmd). */
376 	kmemleak_ignore(pmd);
377 
378 	return pmd;
379 }
380 
381 static void kvmppc_pmd_free(pmd_t *pmdp)
382 {
383 	kmem_cache_free(kvm_pmd_cache, pmdp);
384 }
385 
386 /* Called with kvm->mmu_lock held */
387 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
388 		      unsigned int shift,
389 		      const struct kvm_memory_slot *memslot,
390 		      unsigned int lpid)
391 
392 {
393 	unsigned long old;
394 	unsigned long gfn = gpa >> PAGE_SHIFT;
395 	unsigned long page_size = PAGE_SIZE;
396 	unsigned long hpa;
397 
398 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
399 	kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
400 
401 	/* The following only applies to L1 entries */
402 	if (lpid != kvm->arch.lpid)
403 		return;
404 
405 	if (!memslot) {
406 		memslot = gfn_to_memslot(kvm, gfn);
407 		if (!memslot)
408 			return;
409 	}
410 	if (shift) { /* 1GB or 2MB page */
411 		page_size = 1ul << shift;
412 		if (shift == PMD_SHIFT)
413 			kvm->stat.num_2M_pages--;
414 		else if (shift == PUD_SHIFT)
415 			kvm->stat.num_1G_pages--;
416 	}
417 
418 	gpa &= ~(page_size - 1);
419 	hpa = old & PTE_RPN_MASK;
420 	kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
421 
422 	if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
423 		kvmppc_update_dirty_map(memslot, gfn, page_size);
424 }
425 
426 /*
427  * kvmppc_free_p?d are used to free existing page tables, and recursively
428  * descend and clear and free children.
429  * Callers are responsible for flushing the PWC.
430  *
431  * When page tables are being unmapped/freed as part of page fault path
432  * (full == false), valid ptes are generally not expected; however, there
433  * is one situation where they arise, which is when dirty page logging is
434  * turned off for a memslot while the VM is running.  The new memslot
435  * becomes visible to page faults before the memslot commit function
436  * gets to flush the memslot, which can lead to a 2MB page mapping being
437  * installed for a guest physical address where there are already 64kB
438  * (or 4kB) mappings (of sub-pages of the same 2MB page).
439  */
440 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
441 				  unsigned int lpid)
442 {
443 	if (full) {
444 		memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
445 	} else {
446 		pte_t *p = pte;
447 		unsigned long it;
448 
449 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
450 			if (pte_val(*p) == 0)
451 				continue;
452 			kvmppc_unmap_pte(kvm, p,
453 					 pte_pfn(*p) << PAGE_SHIFT,
454 					 PAGE_SHIFT, NULL, lpid);
455 		}
456 	}
457 
458 	kvmppc_pte_free(pte);
459 }
460 
461 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
462 				  unsigned int lpid)
463 {
464 	unsigned long im;
465 	pmd_t *p = pmd;
466 
467 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
468 		if (!pmd_present(*p))
469 			continue;
470 		if (pmd_is_leaf(*p)) {
471 			if (full) {
472 				pmd_clear(p);
473 			} else {
474 				WARN_ON_ONCE(1);
475 				kvmppc_unmap_pte(kvm, (pte_t *)p,
476 					 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
477 					 PMD_SHIFT, NULL, lpid);
478 			}
479 		} else {
480 			pte_t *pte;
481 
482 			pte = pte_offset_map(p, 0);
483 			kvmppc_unmap_free_pte(kvm, pte, full, lpid);
484 			pmd_clear(p);
485 		}
486 	}
487 	kvmppc_pmd_free(pmd);
488 }
489 
490 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
491 				  unsigned int lpid)
492 {
493 	unsigned long iu;
494 	pud_t *p = pud;
495 
496 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
497 		if (!pud_present(*p))
498 			continue;
499 		if (pud_is_leaf(*p)) {
500 			pud_clear(p);
501 		} else {
502 			pmd_t *pmd;
503 
504 			pmd = pmd_offset(p, 0);
505 			kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
506 			pud_clear(p);
507 		}
508 	}
509 	pud_free(kvm->mm, pud);
510 }
511 
512 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
513 {
514 	unsigned long ig;
515 
516 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
517 		p4d_t *p4d = p4d_offset(pgd, 0);
518 		pud_t *pud;
519 
520 		if (!p4d_present(*p4d))
521 			continue;
522 		pud = pud_offset(p4d, 0);
523 		kvmppc_unmap_free_pud(kvm, pud, lpid);
524 		p4d_clear(p4d);
525 	}
526 }
527 
528 void kvmppc_free_radix(struct kvm *kvm)
529 {
530 	if (kvm->arch.pgtable) {
531 		kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
532 					  kvm->arch.lpid);
533 		pgd_free(kvm->mm, kvm->arch.pgtable);
534 		kvm->arch.pgtable = NULL;
535 	}
536 }
537 
538 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
539 					unsigned long gpa, unsigned int lpid)
540 {
541 	pte_t *pte = pte_offset_kernel(pmd, 0);
542 
543 	/*
544 	 * Clearing the pmd entry then flushing the PWC ensures that the pte
545 	 * page no longer be cached by the MMU, so can be freed without
546 	 * flushing the PWC again.
547 	 */
548 	pmd_clear(pmd);
549 	kvmppc_radix_flush_pwc(kvm, lpid);
550 
551 	kvmppc_unmap_free_pte(kvm, pte, false, lpid);
552 }
553 
554 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
555 					unsigned long gpa, unsigned int lpid)
556 {
557 	pmd_t *pmd = pmd_offset(pud, 0);
558 
559 	/*
560 	 * Clearing the pud entry then flushing the PWC ensures that the pmd
561 	 * page and any children pte pages will no longer be cached by the MMU,
562 	 * so can be freed without flushing the PWC again.
563 	 */
564 	pud_clear(pud);
565 	kvmppc_radix_flush_pwc(kvm, lpid);
566 
567 	kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
568 }
569 
570 /*
571  * There are a number of bits which may differ between different faults to
572  * the same partition scope entry. RC bits, in the course of cleaning and
573  * aging. And the write bit can change, either the access could have been
574  * upgraded, or a read fault could happen concurrently with a write fault
575  * that sets those bits first.
576  */
577 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
578 
579 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
580 		      unsigned long gpa, unsigned int level,
581 		      unsigned long mmu_seq, unsigned int lpid,
582 		      unsigned long *rmapp, struct rmap_nested **n_rmap)
583 {
584 	pgd_t *pgd;
585 	p4d_t *p4d;
586 	pud_t *pud, *new_pud = NULL;
587 	pmd_t *pmd, *new_pmd = NULL;
588 	pte_t *ptep, *new_ptep = NULL;
589 	int ret;
590 
591 	/* Traverse the guest's 2nd-level tree, allocate new levels needed */
592 	pgd = pgtable + pgd_index(gpa);
593 	p4d = p4d_offset(pgd, gpa);
594 
595 	pud = NULL;
596 	if (p4d_present(*p4d))
597 		pud = pud_offset(p4d, gpa);
598 	else
599 		new_pud = pud_alloc_one(kvm->mm, gpa);
600 
601 	pmd = NULL;
602 	if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
603 		pmd = pmd_offset(pud, gpa);
604 	else if (level <= 1)
605 		new_pmd = kvmppc_pmd_alloc();
606 
607 	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
608 		new_ptep = kvmppc_pte_alloc();
609 
610 	/* Check if we might have been invalidated; let the guest retry if so */
611 	spin_lock(&kvm->mmu_lock);
612 	ret = -EAGAIN;
613 	if (mmu_notifier_retry(kvm, mmu_seq))
614 		goto out_unlock;
615 
616 	/* Now traverse again under the lock and change the tree */
617 	ret = -ENOMEM;
618 	if (p4d_none(*p4d)) {
619 		if (!new_pud)
620 			goto out_unlock;
621 		p4d_populate(kvm->mm, p4d, new_pud);
622 		new_pud = NULL;
623 	}
624 	pud = pud_offset(p4d, gpa);
625 	if (pud_is_leaf(*pud)) {
626 		unsigned long hgpa = gpa & PUD_MASK;
627 
628 		/* Check if we raced and someone else has set the same thing */
629 		if (level == 2) {
630 			if (pud_raw(*pud) == pte_raw(pte)) {
631 				ret = 0;
632 				goto out_unlock;
633 			}
634 			/* Valid 1GB page here already, add our extra bits */
635 			WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
636 							PTE_BITS_MUST_MATCH);
637 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
638 					      0, pte_val(pte), hgpa, PUD_SHIFT);
639 			ret = 0;
640 			goto out_unlock;
641 		}
642 		/*
643 		 * If we raced with another CPU which has just put
644 		 * a 1GB pte in after we saw a pmd page, try again.
645 		 */
646 		if (!new_pmd) {
647 			ret = -EAGAIN;
648 			goto out_unlock;
649 		}
650 		/* Valid 1GB page here already, remove it */
651 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
652 				 lpid);
653 	}
654 	if (level == 2) {
655 		if (!pud_none(*pud)) {
656 			/*
657 			 * There's a page table page here, but we wanted to
658 			 * install a large page, so remove and free the page
659 			 * table page.
660 			 */
661 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
662 		}
663 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
664 		if (rmapp && n_rmap)
665 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
666 		ret = 0;
667 		goto out_unlock;
668 	}
669 	if (pud_none(*pud)) {
670 		if (!new_pmd)
671 			goto out_unlock;
672 		pud_populate(kvm->mm, pud, new_pmd);
673 		new_pmd = NULL;
674 	}
675 	pmd = pmd_offset(pud, gpa);
676 	if (pmd_is_leaf(*pmd)) {
677 		unsigned long lgpa = gpa & PMD_MASK;
678 
679 		/* Check if we raced and someone else has set the same thing */
680 		if (level == 1) {
681 			if (pmd_raw(*pmd) == pte_raw(pte)) {
682 				ret = 0;
683 				goto out_unlock;
684 			}
685 			/* Valid 2MB page here already, add our extra bits */
686 			WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
687 							PTE_BITS_MUST_MATCH);
688 			kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
689 					0, pte_val(pte), lgpa, PMD_SHIFT);
690 			ret = 0;
691 			goto out_unlock;
692 		}
693 
694 		/*
695 		 * If we raced with another CPU which has just put
696 		 * a 2MB pte in after we saw a pte page, try again.
697 		 */
698 		if (!new_ptep) {
699 			ret = -EAGAIN;
700 			goto out_unlock;
701 		}
702 		/* Valid 2MB page here already, remove it */
703 		kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
704 				 lpid);
705 	}
706 	if (level == 1) {
707 		if (!pmd_none(*pmd)) {
708 			/*
709 			 * There's a page table page here, but we wanted to
710 			 * install a large page, so remove and free the page
711 			 * table page.
712 			 */
713 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
714 		}
715 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
716 		if (rmapp && n_rmap)
717 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
718 		ret = 0;
719 		goto out_unlock;
720 	}
721 	if (pmd_none(*pmd)) {
722 		if (!new_ptep)
723 			goto out_unlock;
724 		pmd_populate(kvm->mm, pmd, new_ptep);
725 		new_ptep = NULL;
726 	}
727 	ptep = pte_offset_kernel(pmd, gpa);
728 	if (pte_present(*ptep)) {
729 		/* Check if someone else set the same thing */
730 		if (pte_raw(*ptep) == pte_raw(pte)) {
731 			ret = 0;
732 			goto out_unlock;
733 		}
734 		/* Valid page here already, add our extra bits */
735 		WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
736 							PTE_BITS_MUST_MATCH);
737 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
738 		ret = 0;
739 		goto out_unlock;
740 	}
741 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
742 	if (rmapp && n_rmap)
743 		kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
744 	ret = 0;
745 
746  out_unlock:
747 	spin_unlock(&kvm->mmu_lock);
748 	if (new_pud)
749 		pud_free(kvm->mm, new_pud);
750 	if (new_pmd)
751 		kvmppc_pmd_free(new_pmd);
752 	if (new_ptep)
753 		kvmppc_pte_free(new_ptep);
754 	return ret;
755 }
756 
757 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
758 			     unsigned long gpa, unsigned int lpid)
759 {
760 	unsigned long pgflags;
761 	unsigned int shift;
762 	pte_t *ptep;
763 
764 	/*
765 	 * Need to set an R or C bit in the 2nd-level tables;
766 	 * since we are just helping out the hardware here,
767 	 * it is sufficient to do what the hardware does.
768 	 */
769 	pgflags = _PAGE_ACCESSED;
770 	if (writing)
771 		pgflags |= _PAGE_DIRTY;
772 
773 	if (nested)
774 		ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
775 	else
776 		ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
777 
778 	if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
779 		kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
780 		return true;
781 	}
782 	return false;
783 }
784 
785 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
786 				   unsigned long gpa,
787 				   struct kvm_memory_slot *memslot,
788 				   bool writing, bool kvm_ro,
789 				   pte_t *inserted_pte, unsigned int *levelp)
790 {
791 	struct kvm *kvm = vcpu->kvm;
792 	struct page *page = NULL;
793 	unsigned long mmu_seq;
794 	unsigned long hva, gfn = gpa >> PAGE_SHIFT;
795 	bool upgrade_write = false;
796 	bool *upgrade_p = &upgrade_write;
797 	pte_t pte, *ptep;
798 	unsigned int shift, level;
799 	int ret;
800 	bool large_enable;
801 
802 	/* used to check for invalidations in progress */
803 	mmu_seq = kvm->mmu_notifier_seq;
804 	smp_rmb();
805 
806 	/*
807 	 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
808 	 * do it with !atomic && !async, which is how we call it.
809 	 * We always ask for write permission since the common case
810 	 * is that the page is writable.
811 	 */
812 	hva = gfn_to_hva_memslot(memslot, gfn);
813 	if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
814 		upgrade_write = true;
815 	} else {
816 		unsigned long pfn;
817 
818 		/* Call KVM generic code to do the slow-path check */
819 		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
820 					   writing, upgrade_p);
821 		if (is_error_noslot_pfn(pfn))
822 			return -EFAULT;
823 		page = NULL;
824 		if (pfn_valid(pfn)) {
825 			page = pfn_to_page(pfn);
826 			if (PageReserved(page))
827 				page = NULL;
828 		}
829 	}
830 
831 	/*
832 	 * Read the PTE from the process' radix tree and use that
833 	 * so we get the shift and attribute bits.
834 	 */
835 	spin_lock(&kvm->mmu_lock);
836 	ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
837 	pte = __pte(0);
838 	if (ptep)
839 		pte = READ_ONCE(*ptep);
840 	spin_unlock(&kvm->mmu_lock);
841 	/*
842 	 * If the PTE disappeared temporarily due to a THP
843 	 * collapse, just return and let the guest try again.
844 	 */
845 	if (!pte_present(pte)) {
846 		if (page)
847 			put_page(page);
848 		return RESUME_GUEST;
849 	}
850 
851 	/* If we're logging dirty pages, always map single pages */
852 	large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
853 
854 	/* Get pte level from shift/size */
855 	if (large_enable && shift == PUD_SHIFT &&
856 	    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
857 	    (hva & (PUD_SIZE - PAGE_SIZE))) {
858 		level = 2;
859 	} else if (large_enable && shift == PMD_SHIFT &&
860 		   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
861 		   (hva & (PMD_SIZE - PAGE_SIZE))) {
862 		level = 1;
863 	} else {
864 		level = 0;
865 		if (shift > PAGE_SHIFT) {
866 			/*
867 			 * If the pte maps more than one page, bring over
868 			 * bits from the virtual address to get the real
869 			 * address of the specific single page we want.
870 			 */
871 			unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
872 			pte = __pte(pte_val(pte) | (hva & rpnmask));
873 		}
874 	}
875 
876 	pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
877 	if (writing || upgrade_write) {
878 		if (pte_val(pte) & _PAGE_WRITE)
879 			pte = __pte(pte_val(pte) | _PAGE_DIRTY);
880 	} else {
881 		pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
882 	}
883 
884 	/* Allocate space in the tree and write the PTE */
885 	ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
886 				mmu_seq, kvm->arch.lpid, NULL, NULL);
887 	if (inserted_pte)
888 		*inserted_pte = pte;
889 	if (levelp)
890 		*levelp = level;
891 
892 	if (page) {
893 		if (!ret && (pte_val(pte) & _PAGE_WRITE))
894 			set_page_dirty_lock(page);
895 		put_page(page);
896 	}
897 
898 	/* Increment number of large pages if we (successfully) inserted one */
899 	if (!ret) {
900 		if (level == 1)
901 			kvm->stat.num_2M_pages++;
902 		else if (level == 2)
903 			kvm->stat.num_1G_pages++;
904 	}
905 
906 	return ret;
907 }
908 
909 int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
910 				   unsigned long ea, unsigned long dsisr)
911 {
912 	struct kvm *kvm = vcpu->kvm;
913 	unsigned long gpa, gfn;
914 	struct kvm_memory_slot *memslot;
915 	long ret;
916 	bool writing = !!(dsisr & DSISR_ISSTORE);
917 	bool kvm_ro = false;
918 
919 	/* Check for unusual errors */
920 	if (dsisr & DSISR_UNSUPP_MMU) {
921 		pr_err("KVM: Got unsupported MMU fault\n");
922 		return -EFAULT;
923 	}
924 	if (dsisr & DSISR_BADACCESS) {
925 		/* Reflect to the guest as DSI */
926 		pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
927 		kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
928 		return RESUME_GUEST;
929 	}
930 
931 	/* Translate the logical address */
932 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
933 	gpa &= ~0xF000000000000000ul;
934 	gfn = gpa >> PAGE_SHIFT;
935 	if (!(dsisr & DSISR_PRTABLE_FAULT))
936 		gpa |= ea & 0xfff;
937 
938 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
939 		return kvmppc_send_page_to_uv(kvm, gfn);
940 
941 	/* Get the corresponding memslot */
942 	memslot = gfn_to_memslot(kvm, gfn);
943 
944 	/* No memslot means it's an emulated MMIO region */
945 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
946 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
947 			     DSISR_SET_RC)) {
948 			/*
949 			 * Bad address in guest page table tree, or other
950 			 * unusual error - reflect it to the guest as DSI.
951 			 */
952 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
953 			return RESUME_GUEST;
954 		}
955 		return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
956 	}
957 
958 	if (memslot->flags & KVM_MEM_READONLY) {
959 		if (writing) {
960 			/* give the guest a DSI */
961 			kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
962 						       DSISR_PROTFAULT);
963 			return RESUME_GUEST;
964 		}
965 		kvm_ro = true;
966 	}
967 
968 	/* Failed to set the reference/change bits */
969 	if (dsisr & DSISR_SET_RC) {
970 		spin_lock(&kvm->mmu_lock);
971 		if (kvmppc_hv_handle_set_rc(kvm, false, writing,
972 					    gpa, kvm->arch.lpid))
973 			dsisr &= ~DSISR_SET_RC;
974 		spin_unlock(&kvm->mmu_lock);
975 
976 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
977 			       DSISR_PROTFAULT | DSISR_SET_RC)))
978 			return RESUME_GUEST;
979 	}
980 
981 	/* Try to insert a pte */
982 	ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
983 					     kvm_ro, NULL, NULL);
984 
985 	if (ret == 0 || ret == -EAGAIN)
986 		ret = RESUME_GUEST;
987 	return ret;
988 }
989 
990 /* Called with kvm->mmu_lock held */
991 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
992 		    unsigned long gfn)
993 {
994 	pte_t *ptep;
995 	unsigned long gpa = gfn << PAGE_SHIFT;
996 	unsigned int shift;
997 
998 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
999 		uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
1000 		return 0;
1001 	}
1002 
1003 	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1004 	if (ptep && pte_present(*ptep))
1005 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1006 				 kvm->arch.lpid);
1007 	return 0;
1008 }
1009 
1010 /* Called with kvm->mmu_lock held */
1011 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1012 		  unsigned long gfn)
1013 {
1014 	pte_t *ptep;
1015 	unsigned long gpa = gfn << PAGE_SHIFT;
1016 	unsigned int shift;
1017 	int ref = 0;
1018 	unsigned long old, *rmapp;
1019 
1020 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1021 		return ref;
1022 
1023 	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1024 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
1025 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1026 					      gpa, shift);
1027 		/* XXX need to flush tlb here? */
1028 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1029 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1030 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1031 					       old & PTE_RPN_MASK,
1032 					       1UL << shift);
1033 		ref = 1;
1034 	}
1035 	return ref;
1036 }
1037 
1038 /* Called with kvm->mmu_lock held */
1039 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1040 		       unsigned long gfn)
1041 {
1042 	pte_t *ptep;
1043 	unsigned long gpa = gfn << PAGE_SHIFT;
1044 	unsigned int shift;
1045 	int ref = 0;
1046 
1047 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1048 		return ref;
1049 
1050 	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1051 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
1052 		ref = 1;
1053 	return ref;
1054 }
1055 
1056 /* Returns the number of PAGE_SIZE pages that are dirty */
1057 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1058 				struct kvm_memory_slot *memslot, int pagenum)
1059 {
1060 	unsigned long gfn = memslot->base_gfn + pagenum;
1061 	unsigned long gpa = gfn << PAGE_SHIFT;
1062 	pte_t *ptep, pte;
1063 	unsigned int shift;
1064 	int ret = 0;
1065 	unsigned long old, *rmapp;
1066 
1067 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1068 		return ret;
1069 
1070 	/*
1071 	 * For performance reasons we don't hold kvm->mmu_lock while walking the
1072 	 * partition scoped table.
1073 	 */
1074 	ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
1075 	if (!ptep)
1076 		return 0;
1077 
1078 	pte = READ_ONCE(*ptep);
1079 	if (pte_present(pte) && pte_dirty(pte)) {
1080 		spin_lock(&kvm->mmu_lock);
1081 		/*
1082 		 * Recheck the pte again
1083 		 */
1084 		if (pte_val(pte) != pte_val(*ptep)) {
1085 			/*
1086 			 * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1087 			 * only find PAGE_SIZE pte entries here. We can continue
1088 			 * to use the pte addr returned by above page table
1089 			 * walk.
1090 			 */
1091 			if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
1092 				spin_unlock(&kvm->mmu_lock);
1093 				return 0;
1094 			}
1095 		}
1096 
1097 		ret = 1;
1098 		VM_BUG_ON(shift);
1099 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1100 					      gpa, shift);
1101 		kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1102 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1103 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1104 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1105 					       old & PTE_RPN_MASK,
1106 					       1UL << shift);
1107 		spin_unlock(&kvm->mmu_lock);
1108 	}
1109 	return ret;
1110 }
1111 
1112 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1113 			struct kvm_memory_slot *memslot, unsigned long *map)
1114 {
1115 	unsigned long i, j;
1116 	int npages;
1117 
1118 	for (i = 0; i < memslot->npages; i = j) {
1119 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1120 
1121 		/*
1122 		 * Note that if npages > 0 then i must be a multiple of npages,
1123 		 * since huge pages are only used to back the guest at guest
1124 		 * real addresses that are a multiple of their size.
1125 		 * Since we have at most one PTE covering any given guest
1126 		 * real address, if npages > 1 we can skip to i + npages.
1127 		 */
1128 		j = i + 1;
1129 		if (npages) {
1130 			set_dirty_bits(map, i, npages);
1131 			j = i + npages;
1132 		}
1133 	}
1134 	return 0;
1135 }
1136 
1137 void kvmppc_radix_flush_memslot(struct kvm *kvm,
1138 				const struct kvm_memory_slot *memslot)
1139 {
1140 	unsigned long n;
1141 	pte_t *ptep;
1142 	unsigned long gpa;
1143 	unsigned int shift;
1144 
1145 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
1146 		kvmppc_uvmem_drop_pages(memslot, kvm, true);
1147 
1148 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1149 		return;
1150 
1151 	gpa = memslot->base_gfn << PAGE_SHIFT;
1152 	spin_lock(&kvm->mmu_lock);
1153 	for (n = memslot->npages; n; --n) {
1154 		ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1155 		if (ptep && pte_present(*ptep))
1156 			kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1157 					 kvm->arch.lpid);
1158 		gpa += PAGE_SIZE;
1159 	}
1160 	/*
1161 	 * Increase the mmu notifier sequence number to prevent any page
1162 	 * fault that read the memslot earlier from writing a PTE.
1163 	 */
1164 	kvm->mmu_notifier_seq++;
1165 	spin_unlock(&kvm->mmu_lock);
1166 }
1167 
1168 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1169 				 int psize, int *indexp)
1170 {
1171 	if (!mmu_psize_defs[psize].shift)
1172 		return;
1173 	info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1174 		(mmu_psize_defs[psize].ap << 29);
1175 	++(*indexp);
1176 }
1177 
1178 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1179 {
1180 	int i;
1181 
1182 	if (!radix_enabled())
1183 		return -EINVAL;
1184 	memset(info, 0, sizeof(*info));
1185 
1186 	/* 4k page size */
1187 	info->geometries[0].page_shift = 12;
1188 	info->geometries[0].level_bits[0] = 9;
1189 	for (i = 1; i < 4; ++i)
1190 		info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1191 	/* 64k page size */
1192 	info->geometries[1].page_shift = 16;
1193 	for (i = 0; i < 4; ++i)
1194 		info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1195 
1196 	i = 0;
1197 	add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1198 	add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1199 	add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1200 	add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1201 
1202 	return 0;
1203 }
1204 
1205 int kvmppc_init_vm_radix(struct kvm *kvm)
1206 {
1207 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
1208 	if (!kvm->arch.pgtable)
1209 		return -ENOMEM;
1210 	return 0;
1211 }
1212 
1213 static void pte_ctor(void *addr)
1214 {
1215 	memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1216 }
1217 
1218 static void pmd_ctor(void *addr)
1219 {
1220 	memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1221 }
1222 
1223 struct debugfs_radix_state {
1224 	struct kvm	*kvm;
1225 	struct mutex	mutex;
1226 	unsigned long	gpa;
1227 	int		lpid;
1228 	int		chars_left;
1229 	int		buf_index;
1230 	char		buf[128];
1231 	u8		hdr;
1232 };
1233 
1234 static int debugfs_radix_open(struct inode *inode, struct file *file)
1235 {
1236 	struct kvm *kvm = inode->i_private;
1237 	struct debugfs_radix_state *p;
1238 
1239 	p = kzalloc(sizeof(*p), GFP_KERNEL);
1240 	if (!p)
1241 		return -ENOMEM;
1242 
1243 	kvm_get_kvm(kvm);
1244 	p->kvm = kvm;
1245 	mutex_init(&p->mutex);
1246 	file->private_data = p;
1247 
1248 	return nonseekable_open(inode, file);
1249 }
1250 
1251 static int debugfs_radix_release(struct inode *inode, struct file *file)
1252 {
1253 	struct debugfs_radix_state *p = file->private_data;
1254 
1255 	kvm_put_kvm(p->kvm);
1256 	kfree(p);
1257 	return 0;
1258 }
1259 
1260 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1261 				 size_t len, loff_t *ppos)
1262 {
1263 	struct debugfs_radix_state *p = file->private_data;
1264 	ssize_t ret, r;
1265 	unsigned long n;
1266 	struct kvm *kvm;
1267 	unsigned long gpa;
1268 	pgd_t *pgt;
1269 	struct kvm_nested_guest *nested;
1270 	pgd_t *pgdp;
1271 	p4d_t p4d, *p4dp;
1272 	pud_t pud, *pudp;
1273 	pmd_t pmd, *pmdp;
1274 	pte_t *ptep;
1275 	int shift;
1276 	unsigned long pte;
1277 
1278 	kvm = p->kvm;
1279 	if (!kvm_is_radix(kvm))
1280 		return 0;
1281 
1282 	ret = mutex_lock_interruptible(&p->mutex);
1283 	if (ret)
1284 		return ret;
1285 
1286 	if (p->chars_left) {
1287 		n = p->chars_left;
1288 		if (n > len)
1289 			n = len;
1290 		r = copy_to_user(buf, p->buf + p->buf_index, n);
1291 		n -= r;
1292 		p->chars_left -= n;
1293 		p->buf_index += n;
1294 		buf += n;
1295 		len -= n;
1296 		ret = n;
1297 		if (r) {
1298 			if (!n)
1299 				ret = -EFAULT;
1300 			goto out;
1301 		}
1302 	}
1303 
1304 	gpa = p->gpa;
1305 	nested = NULL;
1306 	pgt = NULL;
1307 	while (len != 0 && p->lpid >= 0) {
1308 		if (gpa >= RADIX_PGTABLE_RANGE) {
1309 			gpa = 0;
1310 			pgt = NULL;
1311 			if (nested) {
1312 				kvmhv_put_nested(nested);
1313 				nested = NULL;
1314 			}
1315 			p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1316 			p->hdr = 0;
1317 			if (p->lpid < 0)
1318 				break;
1319 		}
1320 		if (!pgt) {
1321 			if (p->lpid == 0) {
1322 				pgt = kvm->arch.pgtable;
1323 			} else {
1324 				nested = kvmhv_get_nested(kvm, p->lpid, false);
1325 				if (!nested) {
1326 					gpa = RADIX_PGTABLE_RANGE;
1327 					continue;
1328 				}
1329 				pgt = nested->shadow_pgtable;
1330 			}
1331 		}
1332 		n = 0;
1333 		if (!p->hdr) {
1334 			if (p->lpid > 0)
1335 				n = scnprintf(p->buf, sizeof(p->buf),
1336 					      "\nNested LPID %d: ", p->lpid);
1337 			n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1338 				      "pgdir: %lx\n", (unsigned long)pgt);
1339 			p->hdr = 1;
1340 			goto copy;
1341 		}
1342 
1343 		pgdp = pgt + pgd_index(gpa);
1344 		p4dp = p4d_offset(pgdp, gpa);
1345 		p4d = READ_ONCE(*p4dp);
1346 		if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
1347 			gpa = (gpa & P4D_MASK) + P4D_SIZE;
1348 			continue;
1349 		}
1350 
1351 		pudp = pud_offset(&p4d, gpa);
1352 		pud = READ_ONCE(*pudp);
1353 		if (!(pud_val(pud) & _PAGE_PRESENT)) {
1354 			gpa = (gpa & PUD_MASK) + PUD_SIZE;
1355 			continue;
1356 		}
1357 		if (pud_val(pud) & _PAGE_PTE) {
1358 			pte = pud_val(pud);
1359 			shift = PUD_SHIFT;
1360 			goto leaf;
1361 		}
1362 
1363 		pmdp = pmd_offset(&pud, gpa);
1364 		pmd = READ_ONCE(*pmdp);
1365 		if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1366 			gpa = (gpa & PMD_MASK) + PMD_SIZE;
1367 			continue;
1368 		}
1369 		if (pmd_val(pmd) & _PAGE_PTE) {
1370 			pte = pmd_val(pmd);
1371 			shift = PMD_SHIFT;
1372 			goto leaf;
1373 		}
1374 
1375 		ptep = pte_offset_kernel(&pmd, gpa);
1376 		pte = pte_val(READ_ONCE(*ptep));
1377 		if (!(pte & _PAGE_PRESENT)) {
1378 			gpa += PAGE_SIZE;
1379 			continue;
1380 		}
1381 		shift = PAGE_SHIFT;
1382 	leaf:
1383 		n = scnprintf(p->buf, sizeof(p->buf),
1384 			      " %lx: %lx %d\n", gpa, pte, shift);
1385 		gpa += 1ul << shift;
1386 	copy:
1387 		p->chars_left = n;
1388 		if (n > len)
1389 			n = len;
1390 		r = copy_to_user(buf, p->buf, n);
1391 		n -= r;
1392 		p->chars_left -= n;
1393 		p->buf_index = n;
1394 		buf += n;
1395 		len -= n;
1396 		ret += n;
1397 		if (r) {
1398 			if (!ret)
1399 				ret = -EFAULT;
1400 			break;
1401 		}
1402 	}
1403 	p->gpa = gpa;
1404 	if (nested)
1405 		kvmhv_put_nested(nested);
1406 
1407  out:
1408 	mutex_unlock(&p->mutex);
1409 	return ret;
1410 }
1411 
1412 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1413 			   size_t len, loff_t *ppos)
1414 {
1415 	return -EACCES;
1416 }
1417 
1418 static const struct file_operations debugfs_radix_fops = {
1419 	.owner	 = THIS_MODULE,
1420 	.open	 = debugfs_radix_open,
1421 	.release = debugfs_radix_release,
1422 	.read	 = debugfs_radix_read,
1423 	.write	 = debugfs_radix_write,
1424 	.llseek	 = generic_file_llseek,
1425 };
1426 
1427 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1428 {
1429 	debugfs_create_file("radix", 0400, kvm->arch.debugfs_dir, kvm,
1430 			    &debugfs_radix_fops);
1431 }
1432 
1433 int kvmppc_radix_init(void)
1434 {
1435 	unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1436 
1437 	kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1438 	if (!kvm_pte_cache)
1439 		return -ENOMEM;
1440 
1441 	size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1442 
1443 	kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1444 	if (!kvm_pmd_cache) {
1445 		kmem_cache_destroy(kvm_pte_cache);
1446 		return -ENOMEM;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 void kvmppc_radix_exit(void)
1453 {
1454 	kmem_cache_destroy(kvm_pte_cache);
1455 	kmem_cache_destroy(kvm_pmd_cache);
1456 }
1457