1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/page.h>
17 #include <asm/mmu.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 
21 /*
22  * Supported radix tree geometry.
23  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
24  * for a page size of 64k or 4k.
25  */
26 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
27 
28 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
29 			   struct kvmppc_pte *gpte, bool data, bool iswrite)
30 {
31 	struct kvm *kvm = vcpu->kvm;
32 	u32 pid;
33 	int ret, level, ps;
34 	__be64 prte, rpte;
35 	unsigned long root, pte, index;
36 	unsigned long rts, bits, offset;
37 	unsigned long gpa;
38 	unsigned long proc_tbl_size;
39 
40 	/* Work out effective PID */
41 	switch (eaddr >> 62) {
42 	case 0:
43 		pid = vcpu->arch.pid;
44 		break;
45 	case 3:
46 		pid = 0;
47 		break;
48 	default:
49 		return -EINVAL;
50 	}
51 	proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
52 	if (pid * 16 >= proc_tbl_size)
53 		return -EINVAL;
54 
55 	/* Read partition table to find root of tree for effective PID */
56 	ret = kvm_read_guest(kvm, kvm->arch.process_table + pid * 16,
57 			     &prte, sizeof(prte));
58 	if (ret)
59 		return ret;
60 
61 	root = be64_to_cpu(prte);
62 	rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
63 		((root & RTS2_MASK) >> RTS2_SHIFT);
64 	bits = root & RPDS_MASK;
65 	root = root & RPDB_MASK;
66 
67 	/* P9 DD1 interprets RTS (radix tree size) differently */
68 	offset = rts + 31;
69 	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
70 		offset -= 3;
71 
72 	/* current implementations only support 52-bit space */
73 	if (offset != 52)
74 		return -EINVAL;
75 
76 	for (level = 3; level >= 0; --level) {
77 		if (level && bits != p9_supported_radix_bits[level])
78 			return -EINVAL;
79 		if (level == 0 && !(bits == 5 || bits == 9))
80 			return -EINVAL;
81 		offset -= bits;
82 		index = (eaddr >> offset) & ((1UL << bits) - 1);
83 		/* check that low bits of page table base are zero */
84 		if (root & ((1UL << (bits + 3)) - 1))
85 			return -EINVAL;
86 		ret = kvm_read_guest(kvm, root + index * 8,
87 				     &rpte, sizeof(rpte));
88 		if (ret)
89 			return ret;
90 		pte = __be64_to_cpu(rpte);
91 		if (!(pte & _PAGE_PRESENT))
92 			return -ENOENT;
93 		if (pte & _PAGE_PTE)
94 			break;
95 		bits = pte & 0x1f;
96 		root = pte & 0x0fffffffffffff00ul;
97 	}
98 	/* need a leaf at lowest level; 512GB pages not supported */
99 	if (level < 0 || level == 3)
100 		return -EINVAL;
101 
102 	/* offset is now log base 2 of the page size */
103 	gpa = pte & 0x01fffffffffff000ul;
104 	if (gpa & ((1ul << offset) - 1))
105 		return -EINVAL;
106 	gpa += eaddr & ((1ul << offset) - 1);
107 	for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
108 		if (offset == mmu_psize_defs[ps].shift)
109 			break;
110 	gpte->page_size = ps;
111 
112 	gpte->eaddr = eaddr;
113 	gpte->raddr = gpa;
114 
115 	/* Work out permissions */
116 	gpte->may_read = !!(pte & _PAGE_READ);
117 	gpte->may_write = !!(pte & _PAGE_WRITE);
118 	gpte->may_execute = !!(pte & _PAGE_EXEC);
119 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
120 		if (pte & _PAGE_PRIVILEGED) {
121 			gpte->may_read = 0;
122 			gpte->may_write = 0;
123 			gpte->may_execute = 0;
124 		}
125 	} else {
126 		if (!(pte & _PAGE_PRIVILEGED)) {
127 			/* Check AMR/IAMR to see if strict mode is in force */
128 			if (vcpu->arch.amr & (1ul << 62))
129 				gpte->may_read = 0;
130 			if (vcpu->arch.amr & (1ul << 63))
131 				gpte->may_write = 0;
132 			if (vcpu->arch.iamr & (1ul << 62))
133 				gpte->may_execute = 0;
134 		}
135 	}
136 
137 	return 0;
138 }
139 
140 #ifdef CONFIG_PPC_64K_PAGES
141 #define MMU_BASE_PSIZE	MMU_PAGE_64K
142 #else
143 #define MMU_BASE_PSIZE	MMU_PAGE_4K
144 #endif
145 
146 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
147 				    unsigned int pshift)
148 {
149 	int psize = MMU_BASE_PSIZE;
150 
151 	if (pshift >= PMD_SHIFT)
152 		psize = MMU_PAGE_2M;
153 	addr &= ~0xfffUL;
154 	addr |= mmu_psize_defs[psize].ap << 5;
155 	asm volatile("ptesync": : :"memory");
156 	asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
157 		     : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
158 	asm volatile("ptesync": : :"memory");
159 }
160 
161 unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
162 				      unsigned long clr, unsigned long set,
163 				      unsigned long addr, unsigned int shift)
164 {
165 	unsigned long old = 0;
166 
167 	if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
168 	    pte_present(*ptep)) {
169 		/* have to invalidate it first */
170 		old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
171 		kvmppc_radix_tlbie_page(kvm, addr, shift);
172 		set |= _PAGE_PRESENT;
173 		old &= _PAGE_PRESENT;
174 	}
175 	return __radix_pte_update(ptep, clr, set) | old;
176 }
177 
178 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
179 			     pte_t *ptep, pte_t pte)
180 {
181 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
182 }
183 
184 static struct kmem_cache *kvm_pte_cache;
185 
186 static pte_t *kvmppc_pte_alloc(void)
187 {
188 	return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
189 }
190 
191 static void kvmppc_pte_free(pte_t *ptep)
192 {
193 	kmem_cache_free(kvm_pte_cache, ptep);
194 }
195 
196 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
197 			     unsigned int level, unsigned long mmu_seq)
198 {
199 	pgd_t *pgd;
200 	pud_t *pud, *new_pud = NULL;
201 	pmd_t *pmd, *new_pmd = NULL;
202 	pte_t *ptep, *new_ptep = NULL;
203 	unsigned long old;
204 	int ret;
205 
206 	/* Traverse the guest's 2nd-level tree, allocate new levels needed */
207 	pgd = kvm->arch.pgtable + pgd_index(gpa);
208 	pud = NULL;
209 	if (pgd_present(*pgd))
210 		pud = pud_offset(pgd, gpa);
211 	else
212 		new_pud = pud_alloc_one(kvm->mm, gpa);
213 
214 	pmd = NULL;
215 	if (pud && pud_present(*pud))
216 		pmd = pmd_offset(pud, gpa);
217 	else
218 		new_pmd = pmd_alloc_one(kvm->mm, gpa);
219 
220 	if (level == 0 && !(pmd && pmd_present(*pmd)))
221 		new_ptep = kvmppc_pte_alloc();
222 
223 	/* Check if we might have been invalidated; let the guest retry if so */
224 	spin_lock(&kvm->mmu_lock);
225 	ret = -EAGAIN;
226 	if (mmu_notifier_retry(kvm, mmu_seq))
227 		goto out_unlock;
228 
229 	/* Now traverse again under the lock and change the tree */
230 	ret = -ENOMEM;
231 	if (pgd_none(*pgd)) {
232 		if (!new_pud)
233 			goto out_unlock;
234 		pgd_populate(kvm->mm, pgd, new_pud);
235 		new_pud = NULL;
236 	}
237 	pud = pud_offset(pgd, gpa);
238 	if (pud_none(*pud)) {
239 		if (!new_pmd)
240 			goto out_unlock;
241 		pud_populate(kvm->mm, pud, new_pmd);
242 		new_pmd = NULL;
243 	}
244 	pmd = pmd_offset(pud, gpa);
245 	if (pmd_large(*pmd)) {
246 		/* Someone else has instantiated a large page here; retry */
247 		ret = -EAGAIN;
248 		goto out_unlock;
249 	}
250 	if (level == 1 && !pmd_none(*pmd)) {
251 		/*
252 		 * There's a page table page here, but we wanted
253 		 * to install a large page.  Tell the caller and let
254 		 * it try installing a normal page if it wants.
255 		 */
256 		ret = -EBUSY;
257 		goto out_unlock;
258 	}
259 	if (level == 0) {
260 		if (pmd_none(*pmd)) {
261 			if (!new_ptep)
262 				goto out_unlock;
263 			pmd_populate(kvm->mm, pmd, new_ptep);
264 			new_ptep = NULL;
265 		}
266 		ptep = pte_offset_kernel(pmd, gpa);
267 		if (pte_present(*ptep)) {
268 			/* PTE was previously valid, so invalidate it */
269 			old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT,
270 						      0, gpa, 0);
271 			kvmppc_radix_tlbie_page(kvm, gpa, 0);
272 			if (old & _PAGE_DIRTY)
273 				mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
274 		}
275 		kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
276 	} else {
277 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
278 	}
279 	ret = 0;
280 
281  out_unlock:
282 	spin_unlock(&kvm->mmu_lock);
283 	if (new_pud)
284 		pud_free(kvm->mm, new_pud);
285 	if (new_pmd)
286 		pmd_free(kvm->mm, new_pmd);
287 	if (new_ptep)
288 		kvmppc_pte_free(new_ptep);
289 	return ret;
290 }
291 
292 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
293 				   unsigned long ea, unsigned long dsisr)
294 {
295 	struct kvm *kvm = vcpu->kvm;
296 	unsigned long mmu_seq, pte_size;
297 	unsigned long gpa, gfn, hva, pfn;
298 	struct kvm_memory_slot *memslot;
299 	struct page *page = NULL, *pages[1];
300 	long ret, npages, ok;
301 	unsigned int writing;
302 	struct vm_area_struct *vma;
303 	unsigned long flags;
304 	pte_t pte, *ptep;
305 	unsigned long pgflags;
306 	unsigned int shift, level;
307 
308 	/* Check for unusual errors */
309 	if (dsisr & DSISR_UNSUPP_MMU) {
310 		pr_err("KVM: Got unsupported MMU fault\n");
311 		return -EFAULT;
312 	}
313 	if (dsisr & DSISR_BADACCESS) {
314 		/* Reflect to the guest as DSI */
315 		pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
316 		kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
317 		return RESUME_GUEST;
318 	}
319 
320 	/* Translate the logical address and get the page */
321 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
322 	gpa &= ~0xF000000000000000ul;
323 	gfn = gpa >> PAGE_SHIFT;
324 	if (!(dsisr & DSISR_PGDIRFAULT))
325 		gpa |= ea & 0xfff;
326 	memslot = gfn_to_memslot(kvm, gfn);
327 
328 	/* No memslot means it's an emulated MMIO region */
329 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
330 		if (dsisr & (DSISR_PGDIRFAULT | DSISR_BADACCESS |
331 			     DSISR_SET_RC)) {
332 			/*
333 			 * Bad address in guest page table tree, or other
334 			 * unusual error - reflect it to the guest as DSI.
335 			 */
336 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
337 			return RESUME_GUEST;
338 		}
339 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
340 					      dsisr & DSISR_ISSTORE);
341 	}
342 
343 	/* used to check for invalidations in progress */
344 	mmu_seq = kvm->mmu_notifier_seq;
345 	smp_rmb();
346 
347 	writing = (dsisr & DSISR_ISSTORE) != 0;
348 	hva = gfn_to_hva_memslot(memslot, gfn);
349 	if (dsisr & DSISR_SET_RC) {
350 		/*
351 		 * Need to set an R or C bit in the 2nd-level tables;
352 		 * if the relevant bits aren't already set in the linux
353 		 * page tables, fall through to do the gup_fast to
354 		 * set them in the linux page tables too.
355 		 */
356 		ok = 0;
357 		pgflags = _PAGE_ACCESSED;
358 		if (writing)
359 			pgflags |= _PAGE_DIRTY;
360 		local_irq_save(flags);
361 		ptep = __find_linux_pte_or_hugepte(current->mm->pgd, hva,
362 						   NULL, NULL);
363 		if (ptep) {
364 			pte = READ_ONCE(*ptep);
365 			if (pte_present(pte) &&
366 			    (pte_val(pte) & pgflags) == pgflags)
367 				ok = 1;
368 		}
369 		local_irq_restore(flags);
370 		if (ok) {
371 			spin_lock(&kvm->mmu_lock);
372 			if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
373 				spin_unlock(&kvm->mmu_lock);
374 				return RESUME_GUEST;
375 			}
376 			ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable,
377 							gpa, NULL, &shift);
378 			if (ptep && pte_present(*ptep)) {
379 				kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
380 							gpa, shift);
381 				spin_unlock(&kvm->mmu_lock);
382 				return RESUME_GUEST;
383 			}
384 			spin_unlock(&kvm->mmu_lock);
385 		}
386 	}
387 
388 	ret = -EFAULT;
389 	pfn = 0;
390 	pte_size = PAGE_SIZE;
391 	pgflags = _PAGE_READ | _PAGE_EXEC;
392 	level = 0;
393 	npages = get_user_pages_fast(hva, 1, writing, pages);
394 	if (npages < 1) {
395 		/* Check if it's an I/O mapping */
396 		down_read(&current->mm->mmap_sem);
397 		vma = find_vma(current->mm, hva);
398 		if (vma && vma->vm_start <= hva && hva < vma->vm_end &&
399 		    (vma->vm_flags & VM_PFNMAP)) {
400 			pfn = vma->vm_pgoff +
401 				((hva - vma->vm_start) >> PAGE_SHIFT);
402 			pgflags = pgprot_val(vma->vm_page_prot);
403 		}
404 		up_read(&current->mm->mmap_sem);
405 		if (!pfn)
406 			return -EFAULT;
407 	} else {
408 		page = pages[0];
409 		pfn = page_to_pfn(page);
410 		if (PageHuge(page)) {
411 			page = compound_head(page);
412 			pte_size <<= compound_order(page);
413 			/* See if we can insert a 2MB large-page PTE here */
414 			if (pte_size >= PMD_SIZE &&
415 			    (gpa & PMD_MASK & PAGE_MASK) ==
416 			    (hva & PMD_MASK & PAGE_MASK)) {
417 				level = 1;
418 				pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
419 			}
420 		}
421 		/* See if we can provide write access */
422 		if (writing) {
423 			/*
424 			 * We assume gup_fast has set dirty on the host PTE.
425 			 */
426 			pgflags |= _PAGE_WRITE;
427 		} else {
428 			local_irq_save(flags);
429 			ptep = __find_linux_pte_or_hugepte(current->mm->pgd,
430 							hva, NULL, NULL);
431 			if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
432 				pgflags |= _PAGE_WRITE;
433 			local_irq_restore(flags);
434 		}
435 	}
436 
437 	/*
438 	 * Compute the PTE value that we need to insert.
439 	 */
440 	pgflags |= _PAGE_PRESENT | _PAGE_PTE | _PAGE_ACCESSED;
441 	if (pgflags & _PAGE_WRITE)
442 		pgflags |= _PAGE_DIRTY;
443 	pte = pfn_pte(pfn, __pgprot(pgflags));
444 
445 	/* Allocate space in the tree and write the PTE */
446 	ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
447 	if (ret == -EBUSY) {
448 		/*
449 		 * There's already a PMD where wanted to install a large page;
450 		 * for now, fall back to installing a small page.
451 		 */
452 		level = 0;
453 		pfn |= gfn & ((PMD_SIZE >> PAGE_SHIFT) - 1);
454 		pte = pfn_pte(pfn, __pgprot(pgflags));
455 		ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
456 	}
457 	if (ret == 0 || ret == -EAGAIN)
458 		ret = RESUME_GUEST;
459 
460 	if (page) {
461 		/*
462 		 * We drop pages[0] here, not page because page might
463 		 * have been set to the head page of a compound, but
464 		 * we have to drop the reference on the correct tail
465 		 * page to match the get inside gup()
466 		 */
467 		put_page(pages[0]);
468 	}
469 	return ret;
470 }
471 
472 static void mark_pages_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot,
473 			     unsigned long gfn, unsigned int order)
474 {
475 	unsigned long i, limit;
476 	unsigned long *dp;
477 
478 	if (!memslot->dirty_bitmap)
479 		return;
480 	limit = 1ul << order;
481 	if (limit < BITS_PER_LONG) {
482 		for (i = 0; i < limit; ++i)
483 			mark_page_dirty(kvm, gfn + i);
484 		return;
485 	}
486 	dp = memslot->dirty_bitmap + (gfn - memslot->base_gfn);
487 	limit /= BITS_PER_LONG;
488 	for (i = 0; i < limit; ++i)
489 		*dp++ = ~0ul;
490 }
491 
492 /* Called with kvm->lock held */
493 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
494 		    unsigned long gfn)
495 {
496 	pte_t *ptep;
497 	unsigned long gpa = gfn << PAGE_SHIFT;
498 	unsigned int shift;
499 	unsigned long old;
500 
501 	ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
502 					   NULL, &shift);
503 	if (ptep && pte_present(*ptep)) {
504 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
505 					      gpa, shift);
506 		kvmppc_radix_tlbie_page(kvm, gpa, shift);
507 		if (old & _PAGE_DIRTY) {
508 			if (!shift)
509 				mark_page_dirty(kvm, gfn);
510 			else
511 				mark_pages_dirty(kvm, memslot,
512 						 gfn, shift - PAGE_SHIFT);
513 		}
514 	}
515 	return 0;
516 }
517 
518 /* Called with kvm->lock held */
519 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
520 		  unsigned long gfn)
521 {
522 	pte_t *ptep;
523 	unsigned long gpa = gfn << PAGE_SHIFT;
524 	unsigned int shift;
525 	int ref = 0;
526 
527 	ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
528 					   NULL, &shift);
529 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
530 		kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
531 					gpa, shift);
532 		/* XXX need to flush tlb here? */
533 		ref = 1;
534 	}
535 	return ref;
536 }
537 
538 /* Called with kvm->lock held */
539 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
540 		       unsigned long gfn)
541 {
542 	pte_t *ptep;
543 	unsigned long gpa = gfn << PAGE_SHIFT;
544 	unsigned int shift;
545 	int ref = 0;
546 
547 	ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
548 					   NULL, &shift);
549 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
550 		ref = 1;
551 	return ref;
552 }
553 
554 /* Returns the number of PAGE_SIZE pages that are dirty */
555 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
556 				struct kvm_memory_slot *memslot, int pagenum)
557 {
558 	unsigned long gfn = memslot->base_gfn + pagenum;
559 	unsigned long gpa = gfn << PAGE_SHIFT;
560 	pte_t *ptep;
561 	unsigned int shift;
562 	int ret = 0;
563 
564 	ptep = __find_linux_pte_or_hugepte(kvm->arch.pgtable, gpa,
565 					   NULL, &shift);
566 	if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
567 		ret = 1;
568 		if (shift)
569 			ret = 1 << (shift - PAGE_SHIFT);
570 		kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
571 					gpa, shift);
572 		kvmppc_radix_tlbie_page(kvm, gpa, shift);
573 	}
574 	return ret;
575 }
576 
577 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
578 			struct kvm_memory_slot *memslot, unsigned long *map)
579 {
580 	unsigned long i, j;
581 	unsigned long n, *p;
582 	int npages;
583 
584 	/*
585 	 * Radix accumulates dirty bits in the first half of the
586 	 * memslot's dirty_bitmap area, for when pages are paged
587 	 * out or modified by the host directly.  Pick up these
588 	 * bits and add them to the map.
589 	 */
590 	n = kvm_dirty_bitmap_bytes(memslot) / sizeof(long);
591 	p = memslot->dirty_bitmap;
592 	for (i = 0; i < n; ++i)
593 		map[i] |= xchg(&p[i], 0);
594 
595 	for (i = 0; i < memslot->npages; i = j) {
596 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
597 
598 		/*
599 		 * Note that if npages > 0 then i must be a multiple of npages,
600 		 * since huge pages are only used to back the guest at guest
601 		 * real addresses that are a multiple of their size.
602 		 * Since we have at most one PTE covering any given guest
603 		 * real address, if npages > 1 we can skip to i + npages.
604 		 */
605 		j = i + 1;
606 		if (npages)
607 			for (j = i; npages; ++j, --npages)
608 				__set_bit_le(j, map);
609 	}
610 	return 0;
611 }
612 
613 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
614 				 int psize, int *indexp)
615 {
616 	if (!mmu_psize_defs[psize].shift)
617 		return;
618 	info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
619 		(mmu_psize_defs[psize].ap << 29);
620 	++(*indexp);
621 }
622 
623 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
624 {
625 	int i;
626 
627 	if (!radix_enabled())
628 		return -EINVAL;
629 	memset(info, 0, sizeof(*info));
630 
631 	/* 4k page size */
632 	info->geometries[0].page_shift = 12;
633 	info->geometries[0].level_bits[0] = 9;
634 	for (i = 1; i < 4; ++i)
635 		info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
636 	/* 64k page size */
637 	info->geometries[1].page_shift = 16;
638 	for (i = 0; i < 4; ++i)
639 		info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
640 
641 	i = 0;
642 	add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
643 	add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
644 	add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
645 	add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
646 
647 	return 0;
648 }
649 
650 int kvmppc_init_vm_radix(struct kvm *kvm)
651 {
652 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
653 	if (!kvm->arch.pgtable)
654 		return -ENOMEM;
655 	return 0;
656 }
657 
658 void kvmppc_free_radix(struct kvm *kvm)
659 {
660 	unsigned long ig, iu, im;
661 	pte_t *pte;
662 	pmd_t *pmd;
663 	pud_t *pud;
664 	pgd_t *pgd;
665 
666 	if (!kvm->arch.pgtable)
667 		return;
668 	pgd = kvm->arch.pgtable;
669 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
670 		if (!pgd_present(*pgd))
671 			continue;
672 		pud = pud_offset(pgd, 0);
673 		for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) {
674 			if (!pud_present(*pud))
675 				continue;
676 			pmd = pmd_offset(pud, 0);
677 			for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
678 				if (pmd_huge(*pmd)) {
679 					pmd_clear(pmd);
680 					continue;
681 				}
682 				if (!pmd_present(*pmd))
683 					continue;
684 				pte = pte_offset_map(pmd, 0);
685 				memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
686 				kvmppc_pte_free(pte);
687 				pmd_clear(pmd);
688 			}
689 			pmd_free(kvm->mm, pmd_offset(pud, 0));
690 			pud_clear(pud);
691 		}
692 		pud_free(kvm->mm, pud_offset(pgd, 0));
693 		pgd_clear(pgd);
694 	}
695 	pgd_free(kvm->mm, kvm->arch.pgtable);
696 }
697 
698 static void pte_ctor(void *addr)
699 {
700 	memset(addr, 0, PTE_TABLE_SIZE);
701 }
702 
703 int kvmppc_radix_init(void)
704 {
705 	unsigned long size = sizeof(void *) << PTE_INDEX_SIZE;
706 
707 	kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
708 	if (!kvm_pte_cache)
709 		return -ENOMEM;
710 	return 0;
711 }
712 
713 void kvmppc_radix_exit(void)
714 {
715 	kmem_cache_destroy(kvm_pte_cache);
716 }
717