1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/page.h>
17 #include <asm/mmu.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21 
22 /*
23  * Supported radix tree geometry.
24  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
25  * for a page size of 64k or 4k.
26  */
27 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
28 
29 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
30 			   struct kvmppc_pte *gpte, bool data, bool iswrite)
31 {
32 	struct kvm *kvm = vcpu->kvm;
33 	u32 pid;
34 	int ret, level, ps;
35 	__be64 prte, rpte;
36 	unsigned long ptbl;
37 	unsigned long root, pte, index;
38 	unsigned long rts, bits, offset;
39 	unsigned long gpa;
40 	unsigned long proc_tbl_size;
41 
42 	/* Work out effective PID */
43 	switch (eaddr >> 62) {
44 	case 0:
45 		pid = vcpu->arch.pid;
46 		break;
47 	case 3:
48 		pid = 0;
49 		break;
50 	default:
51 		return -EINVAL;
52 	}
53 	proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
54 	if (pid * 16 >= proc_tbl_size)
55 		return -EINVAL;
56 
57 	/* Read partition table to find root of tree for effective PID */
58 	ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
59 	ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
60 	if (ret)
61 		return ret;
62 
63 	root = be64_to_cpu(prte);
64 	rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
65 		((root & RTS2_MASK) >> RTS2_SHIFT);
66 	bits = root & RPDS_MASK;
67 	root = root & RPDB_MASK;
68 
69 	offset = rts + 31;
70 
71 	/* current implementations only support 52-bit space */
72 	if (offset != 52)
73 		return -EINVAL;
74 
75 	for (level = 3; level >= 0; --level) {
76 		if (level && bits != p9_supported_radix_bits[level])
77 			return -EINVAL;
78 		if (level == 0 && !(bits == 5 || bits == 9))
79 			return -EINVAL;
80 		offset -= bits;
81 		index = (eaddr >> offset) & ((1UL << bits) - 1);
82 		/* check that low bits of page table base are zero */
83 		if (root & ((1UL << (bits + 3)) - 1))
84 			return -EINVAL;
85 		ret = kvm_read_guest(kvm, root + index * 8,
86 				     &rpte, sizeof(rpte));
87 		if (ret)
88 			return ret;
89 		pte = __be64_to_cpu(rpte);
90 		if (!(pte & _PAGE_PRESENT))
91 			return -ENOENT;
92 		if (pte & _PAGE_PTE)
93 			break;
94 		bits = pte & 0x1f;
95 		root = pte & 0x0fffffffffffff00ul;
96 	}
97 	/* need a leaf at lowest level; 512GB pages not supported */
98 	if (level < 0 || level == 3)
99 		return -EINVAL;
100 
101 	/* offset is now log base 2 of the page size */
102 	gpa = pte & 0x01fffffffffff000ul;
103 	if (gpa & ((1ul << offset) - 1))
104 		return -EINVAL;
105 	gpa += eaddr & ((1ul << offset) - 1);
106 	for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
107 		if (offset == mmu_psize_defs[ps].shift)
108 			break;
109 	gpte->page_size = ps;
110 
111 	gpte->eaddr = eaddr;
112 	gpte->raddr = gpa;
113 
114 	/* Work out permissions */
115 	gpte->may_read = !!(pte & _PAGE_READ);
116 	gpte->may_write = !!(pte & _PAGE_WRITE);
117 	gpte->may_execute = !!(pte & _PAGE_EXEC);
118 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
119 		if (pte & _PAGE_PRIVILEGED) {
120 			gpte->may_read = 0;
121 			gpte->may_write = 0;
122 			gpte->may_execute = 0;
123 		}
124 	} else {
125 		if (!(pte & _PAGE_PRIVILEGED)) {
126 			/* Check AMR/IAMR to see if strict mode is in force */
127 			if (vcpu->arch.amr & (1ul << 62))
128 				gpte->may_read = 0;
129 			if (vcpu->arch.amr & (1ul << 63))
130 				gpte->may_write = 0;
131 			if (vcpu->arch.iamr & (1ul << 62))
132 				gpte->may_execute = 0;
133 		}
134 	}
135 
136 	return 0;
137 }
138 
139 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
140 				    unsigned int pshift)
141 {
142 	unsigned long psize = PAGE_SIZE;
143 
144 	if (pshift)
145 		psize = 1UL << pshift;
146 
147 	addr &= ~(psize - 1);
148 	radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
149 }
150 
151 static void kvmppc_radix_flush_pwc(struct kvm *kvm)
152 {
153 	radix__flush_pwc_lpid(kvm->arch.lpid);
154 }
155 
156 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
157 				      unsigned long clr, unsigned long set,
158 				      unsigned long addr, unsigned int shift)
159 {
160 	return __radix_pte_update(ptep, clr, set);
161 }
162 
163 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
164 			     pte_t *ptep, pte_t pte)
165 {
166 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
167 }
168 
169 static struct kmem_cache *kvm_pte_cache;
170 static struct kmem_cache *kvm_pmd_cache;
171 
172 static pte_t *kvmppc_pte_alloc(void)
173 {
174 	return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
175 }
176 
177 static void kvmppc_pte_free(pte_t *ptep)
178 {
179 	kmem_cache_free(kvm_pte_cache, ptep);
180 }
181 
182 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
183 static inline int pmd_is_leaf(pmd_t pmd)
184 {
185 	return !!(pmd_val(pmd) & _PAGE_PTE);
186 }
187 
188 static pmd_t *kvmppc_pmd_alloc(void)
189 {
190 	return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
191 }
192 
193 static void kvmppc_pmd_free(pmd_t *pmdp)
194 {
195 	kmem_cache_free(kvm_pmd_cache, pmdp);
196 }
197 
198 static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
199 			     unsigned long gpa, unsigned int shift)
200 
201 {
202 	unsigned long page_size = 1ul << shift;
203 	unsigned long old;
204 
205 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
206 	kvmppc_radix_tlbie_page(kvm, gpa, shift);
207 	if (old & _PAGE_DIRTY) {
208 		unsigned long gfn = gpa >> PAGE_SHIFT;
209 		struct kvm_memory_slot *memslot;
210 
211 		memslot = gfn_to_memslot(kvm, gfn);
212 		if (memslot && memslot->dirty_bitmap)
213 			kvmppc_update_dirty_map(memslot, gfn, page_size);
214 	}
215 }
216 
217 /*
218  * kvmppc_free_p?d are used to free existing page tables, and recursively
219  * descend and clear and free children.
220  * Callers are responsible for flushing the PWC.
221  *
222  * When page tables are being unmapped/freed as part of page fault path
223  * (full == false), ptes are not expected. There is code to unmap them
224  * and emit a warning if encountered, but there may already be data
225  * corruption due to the unexpected mappings.
226  */
227 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
228 {
229 	if (full) {
230 		memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
231 	} else {
232 		pte_t *p = pte;
233 		unsigned long it;
234 
235 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
236 			if (pte_val(*p) == 0)
237 				continue;
238 			WARN_ON_ONCE(1);
239 			kvmppc_unmap_pte(kvm, p,
240 					 pte_pfn(*p) << PAGE_SHIFT,
241 					 PAGE_SHIFT);
242 		}
243 	}
244 
245 	kvmppc_pte_free(pte);
246 }
247 
248 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
249 {
250 	unsigned long im;
251 	pmd_t *p = pmd;
252 
253 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
254 		if (!pmd_present(*p))
255 			continue;
256 		if (pmd_is_leaf(*p)) {
257 			if (full) {
258 				pmd_clear(p);
259 			} else {
260 				WARN_ON_ONCE(1);
261 				kvmppc_unmap_pte(kvm, (pte_t *)p,
262 					 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
263 					 PMD_SHIFT);
264 			}
265 		} else {
266 			pte_t *pte;
267 
268 			pte = pte_offset_map(p, 0);
269 			kvmppc_unmap_free_pte(kvm, pte, full);
270 			pmd_clear(p);
271 		}
272 	}
273 	kvmppc_pmd_free(pmd);
274 }
275 
276 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
277 {
278 	unsigned long iu;
279 	pud_t *p = pud;
280 
281 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
282 		if (!pud_present(*p))
283 			continue;
284 		if (pud_huge(*p)) {
285 			pud_clear(p);
286 		} else {
287 			pmd_t *pmd;
288 
289 			pmd = pmd_offset(p, 0);
290 			kvmppc_unmap_free_pmd(kvm, pmd, true);
291 			pud_clear(p);
292 		}
293 	}
294 	pud_free(kvm->mm, pud);
295 }
296 
297 void kvmppc_free_radix(struct kvm *kvm)
298 {
299 	unsigned long ig;
300 	pgd_t *pgd;
301 
302 	if (!kvm->arch.pgtable)
303 		return;
304 	pgd = kvm->arch.pgtable;
305 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
306 		pud_t *pud;
307 
308 		if (!pgd_present(*pgd))
309 			continue;
310 		pud = pud_offset(pgd, 0);
311 		kvmppc_unmap_free_pud(kvm, pud);
312 		pgd_clear(pgd);
313 	}
314 	pgd_free(kvm->mm, kvm->arch.pgtable);
315 	kvm->arch.pgtable = NULL;
316 }
317 
318 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
319 					      unsigned long gpa)
320 {
321 	pte_t *pte = pte_offset_kernel(pmd, 0);
322 
323 	/*
324 	 * Clearing the pmd entry then flushing the PWC ensures that the pte
325 	 * page no longer be cached by the MMU, so can be freed without
326 	 * flushing the PWC again.
327 	 */
328 	pmd_clear(pmd);
329 	kvmppc_radix_flush_pwc(kvm);
330 
331 	kvmppc_unmap_free_pte(kvm, pte, false);
332 }
333 
334 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
335 					unsigned long gpa)
336 {
337 	pmd_t *pmd = pmd_offset(pud, 0);
338 
339 	/*
340 	 * Clearing the pud entry then flushing the PWC ensures that the pmd
341 	 * page and any children pte pages will no longer be cached by the MMU,
342 	 * so can be freed without flushing the PWC again.
343 	 */
344 	pud_clear(pud);
345 	kvmppc_radix_flush_pwc(kvm);
346 
347 	kvmppc_unmap_free_pmd(kvm, pmd, false);
348 }
349 
350 /*
351  * There are a number of bits which may differ between different faults to
352  * the same partition scope entry. RC bits, in the course of cleaning and
353  * aging. And the write bit can change, either the access could have been
354  * upgraded, or a read fault could happen concurrently with a write fault
355  * that sets those bits first.
356  */
357 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
358 
359 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
360 			     unsigned int level, unsigned long mmu_seq)
361 {
362 	pgd_t *pgd;
363 	pud_t *pud, *new_pud = NULL;
364 	pmd_t *pmd, *new_pmd = NULL;
365 	pte_t *ptep, *new_ptep = NULL;
366 	int ret;
367 
368 	/* Traverse the guest's 2nd-level tree, allocate new levels needed */
369 	pgd = kvm->arch.pgtable + pgd_index(gpa);
370 	pud = NULL;
371 	if (pgd_present(*pgd))
372 		pud = pud_offset(pgd, gpa);
373 	else
374 		new_pud = pud_alloc_one(kvm->mm, gpa);
375 
376 	pmd = NULL;
377 	if (pud && pud_present(*pud) && !pud_huge(*pud))
378 		pmd = pmd_offset(pud, gpa);
379 	else if (level <= 1)
380 		new_pmd = kvmppc_pmd_alloc();
381 
382 	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
383 		new_ptep = kvmppc_pte_alloc();
384 
385 	/* Check if we might have been invalidated; let the guest retry if so */
386 	spin_lock(&kvm->mmu_lock);
387 	ret = -EAGAIN;
388 	if (mmu_notifier_retry(kvm, mmu_seq))
389 		goto out_unlock;
390 
391 	/* Now traverse again under the lock and change the tree */
392 	ret = -ENOMEM;
393 	if (pgd_none(*pgd)) {
394 		if (!new_pud)
395 			goto out_unlock;
396 		pgd_populate(kvm->mm, pgd, new_pud);
397 		new_pud = NULL;
398 	}
399 	pud = pud_offset(pgd, gpa);
400 	if (pud_huge(*pud)) {
401 		unsigned long hgpa = gpa & PUD_MASK;
402 
403 		/* Check if we raced and someone else has set the same thing */
404 		if (level == 2) {
405 			if (pud_raw(*pud) == pte_raw(pte)) {
406 				ret = 0;
407 				goto out_unlock;
408 			}
409 			/* Valid 1GB page here already, add our extra bits */
410 			WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
411 							PTE_BITS_MUST_MATCH);
412 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
413 					      0, pte_val(pte), hgpa, PUD_SHIFT);
414 			ret = 0;
415 			goto out_unlock;
416 		}
417 		/*
418 		 * If we raced with another CPU which has just put
419 		 * a 1GB pte in after we saw a pmd page, try again.
420 		 */
421 		if (!new_pmd) {
422 			ret = -EAGAIN;
423 			goto out_unlock;
424 		}
425 		/* Valid 1GB page here already, remove it */
426 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
427 	}
428 	if (level == 2) {
429 		if (!pud_none(*pud)) {
430 			/*
431 			 * There's a page table page here, but we wanted to
432 			 * install a large page, so remove and free the page
433 			 * table page.
434 			 */
435 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
436 		}
437 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
438 		ret = 0;
439 		goto out_unlock;
440 	}
441 	if (pud_none(*pud)) {
442 		if (!new_pmd)
443 			goto out_unlock;
444 		pud_populate(kvm->mm, pud, new_pmd);
445 		new_pmd = NULL;
446 	}
447 	pmd = pmd_offset(pud, gpa);
448 	if (pmd_is_leaf(*pmd)) {
449 		unsigned long lgpa = gpa & PMD_MASK;
450 
451 		/* Check if we raced and someone else has set the same thing */
452 		if (level == 1) {
453 			if (pmd_raw(*pmd) == pte_raw(pte)) {
454 				ret = 0;
455 				goto out_unlock;
456 			}
457 			/* Valid 2MB page here already, add our extra bits */
458 			WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
459 							PTE_BITS_MUST_MATCH);
460 			kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
461 					      0, pte_val(pte), lgpa, PMD_SHIFT);
462 			ret = 0;
463 			goto out_unlock;
464 		}
465 
466 		/*
467 		 * If we raced with another CPU which has just put
468 		 * a 2MB pte in after we saw a pte page, try again.
469 		 */
470 		if (!new_ptep) {
471 			ret = -EAGAIN;
472 			goto out_unlock;
473 		}
474 		/* Valid 2MB page here already, remove it */
475 		kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
476 	}
477 	if (level == 1) {
478 		if (!pmd_none(*pmd)) {
479 			/*
480 			 * There's a page table page here, but we wanted to
481 			 * install a large page, so remove and free the page
482 			 * table page.
483 			 */
484 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
485 		}
486 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
487 		ret = 0;
488 		goto out_unlock;
489 	}
490 	if (pmd_none(*pmd)) {
491 		if (!new_ptep)
492 			goto out_unlock;
493 		pmd_populate(kvm->mm, pmd, new_ptep);
494 		new_ptep = NULL;
495 	}
496 	ptep = pte_offset_kernel(pmd, gpa);
497 	if (pte_present(*ptep)) {
498 		/* Check if someone else set the same thing */
499 		if (pte_raw(*ptep) == pte_raw(pte)) {
500 			ret = 0;
501 			goto out_unlock;
502 		}
503 		/* Valid page here already, add our extra bits */
504 		WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
505 							PTE_BITS_MUST_MATCH);
506 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
507 		ret = 0;
508 		goto out_unlock;
509 	}
510 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
511 	ret = 0;
512 
513  out_unlock:
514 	spin_unlock(&kvm->mmu_lock);
515 	if (new_pud)
516 		pud_free(kvm->mm, new_pud);
517 	if (new_pmd)
518 		kvmppc_pmd_free(new_pmd);
519 	if (new_ptep)
520 		kvmppc_pte_free(new_ptep);
521 	return ret;
522 }
523 
524 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
525 				   unsigned long ea, unsigned long dsisr)
526 {
527 	struct kvm *kvm = vcpu->kvm;
528 	unsigned long mmu_seq, pte_size;
529 	unsigned long gpa, gfn, hva, pfn;
530 	struct kvm_memory_slot *memslot;
531 	struct page *page = NULL;
532 	long ret;
533 	bool writing;
534 	bool upgrade_write = false;
535 	bool *upgrade_p = &upgrade_write;
536 	pte_t pte, *ptep;
537 	unsigned long pgflags;
538 	unsigned int shift, level;
539 
540 	/* Check for unusual errors */
541 	if (dsisr & DSISR_UNSUPP_MMU) {
542 		pr_err("KVM: Got unsupported MMU fault\n");
543 		return -EFAULT;
544 	}
545 	if (dsisr & DSISR_BADACCESS) {
546 		/* Reflect to the guest as DSI */
547 		pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
548 		kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
549 		return RESUME_GUEST;
550 	}
551 
552 	/* Translate the logical address and get the page */
553 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
554 	gpa &= ~0xF000000000000000ul;
555 	gfn = gpa >> PAGE_SHIFT;
556 	if (!(dsisr & DSISR_PRTABLE_FAULT))
557 		gpa |= ea & 0xfff;
558 	memslot = gfn_to_memslot(kvm, gfn);
559 
560 	/* No memslot means it's an emulated MMIO region */
561 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
562 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
563 			     DSISR_SET_RC)) {
564 			/*
565 			 * Bad address in guest page table tree, or other
566 			 * unusual error - reflect it to the guest as DSI.
567 			 */
568 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
569 			return RESUME_GUEST;
570 		}
571 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
572 					      dsisr & DSISR_ISSTORE);
573 	}
574 
575 	writing = (dsisr & DSISR_ISSTORE) != 0;
576 	if (memslot->flags & KVM_MEM_READONLY) {
577 		if (writing) {
578 			/* give the guest a DSI */
579 			dsisr = DSISR_ISSTORE | DSISR_PROTFAULT;
580 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
581 			return RESUME_GUEST;
582 		}
583 		upgrade_p = NULL;
584 	}
585 
586 	if (dsisr & DSISR_SET_RC) {
587 		/*
588 		 * Need to set an R or C bit in the 2nd-level tables;
589 		 * since we are just helping out the hardware here,
590 		 * it is sufficient to do what the hardware does.
591 		 */
592 		pgflags = _PAGE_ACCESSED;
593 		if (writing)
594 			pgflags |= _PAGE_DIRTY;
595 		/*
596 		 * We are walking the secondary page table here. We can do this
597 		 * without disabling irq.
598 		 */
599 		spin_lock(&kvm->mmu_lock);
600 		ptep = __find_linux_pte(kvm->arch.pgtable,
601 					gpa, NULL, &shift);
602 		if (ptep && pte_present(*ptep) &&
603 		    (!writing || pte_write(*ptep))) {
604 			kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
605 						gpa, shift);
606 			dsisr &= ~DSISR_SET_RC;
607 		}
608 		spin_unlock(&kvm->mmu_lock);
609 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
610 			       DSISR_PROTFAULT | DSISR_SET_RC)))
611 			return RESUME_GUEST;
612 	}
613 
614 	/* used to check for invalidations in progress */
615 	mmu_seq = kvm->mmu_notifier_seq;
616 	smp_rmb();
617 
618 	/*
619 	 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
620 	 * do it with !atomic && !async, which is how we call it.
621 	 * We always ask for write permission since the common case
622 	 * is that the page is writable.
623 	 */
624 	hva = gfn_to_hva_memslot(memslot, gfn);
625 	if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
626 		pfn = page_to_pfn(page);
627 		upgrade_write = true;
628 	} else {
629 		/* Call KVM generic code to do the slow-path check */
630 		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
631 					   writing, upgrade_p);
632 		if (is_error_noslot_pfn(pfn))
633 			return -EFAULT;
634 		page = NULL;
635 		if (pfn_valid(pfn)) {
636 			page = pfn_to_page(pfn);
637 			if (PageReserved(page))
638 				page = NULL;
639 		}
640 	}
641 
642 	/* See if we can insert a 1GB or 2MB large PTE here */
643 	level = 0;
644 	if (page && PageCompound(page)) {
645 		pte_size = PAGE_SIZE << compound_order(compound_head(page));
646 		if (pte_size >= PUD_SIZE &&
647 		    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
648 		    (hva & (PUD_SIZE - PAGE_SIZE))) {
649 			level = 2;
650 			pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
651 		} else if (pte_size >= PMD_SIZE &&
652 			   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
653 			   (hva & (PMD_SIZE - PAGE_SIZE))) {
654 			level = 1;
655 			pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
656 		}
657 	}
658 
659 	/*
660 	 * Compute the PTE value that we need to insert.
661 	 */
662 	if (page) {
663 		pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
664 			_PAGE_ACCESSED;
665 		if (writing || upgrade_write)
666 			pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
667 		pte = pfn_pte(pfn, __pgprot(pgflags));
668 	} else {
669 		/*
670 		 * Read the PTE from the process' radix tree and use that
671 		 * so we get the attribute bits.
672 		 */
673 		local_irq_disable();
674 		ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
675 		pte = *ptep;
676 		local_irq_enable();
677 		if (shift == PUD_SHIFT &&
678 		    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
679 		    (hva & (PUD_SIZE - PAGE_SIZE))) {
680 			level = 2;
681 		} else if (shift == PMD_SHIFT &&
682 			   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
683 			   (hva & (PMD_SIZE - PAGE_SIZE))) {
684 			level = 1;
685 		} else if (shift && shift != PAGE_SHIFT) {
686 			/* Adjust PFN */
687 			unsigned long mask = (1ul << shift) - PAGE_SIZE;
688 			pte = __pte(pte_val(pte) | (hva & mask));
689 		}
690 		pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
691 		if (writing || upgrade_write) {
692 			if (pte_val(pte) & _PAGE_WRITE)
693 				pte = __pte(pte_val(pte) | _PAGE_DIRTY);
694 		} else {
695 			pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
696 		}
697 	}
698 
699 	/* Allocate space in the tree and write the PTE */
700 	ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
701 
702 	if (page) {
703 		if (!ret && (pte_val(pte) & _PAGE_WRITE))
704 			set_page_dirty_lock(page);
705 		put_page(page);
706 	}
707 
708 	if (ret == 0 || ret == -EAGAIN)
709 		ret = RESUME_GUEST;
710 	return ret;
711 }
712 
713 /* Called with kvm->lock held */
714 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
715 		    unsigned long gfn)
716 {
717 	pte_t *ptep;
718 	unsigned long gpa = gfn << PAGE_SHIFT;
719 	unsigned int shift;
720 	unsigned long old;
721 
722 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
723 	if (ptep && pte_present(*ptep)) {
724 		old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
725 					      gpa, shift);
726 		kvmppc_radix_tlbie_page(kvm, gpa, shift);
727 		if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
728 			unsigned long npages = 1;
729 			if (shift)
730 				npages = 1ul << (shift - PAGE_SHIFT);
731 			kvmppc_update_dirty_map(memslot, gfn, npages);
732 		}
733 	}
734 	return 0;
735 }
736 
737 /* Called with kvm->lock held */
738 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
739 		  unsigned long gfn)
740 {
741 	pte_t *ptep;
742 	unsigned long gpa = gfn << PAGE_SHIFT;
743 	unsigned int shift;
744 	int ref = 0;
745 
746 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
747 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
748 		kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
749 					gpa, shift);
750 		/* XXX need to flush tlb here? */
751 		ref = 1;
752 	}
753 	return ref;
754 }
755 
756 /* Called with kvm->lock held */
757 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
758 		       unsigned long gfn)
759 {
760 	pte_t *ptep;
761 	unsigned long gpa = gfn << PAGE_SHIFT;
762 	unsigned int shift;
763 	int ref = 0;
764 
765 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
766 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
767 		ref = 1;
768 	return ref;
769 }
770 
771 /* Returns the number of PAGE_SIZE pages that are dirty */
772 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
773 				struct kvm_memory_slot *memslot, int pagenum)
774 {
775 	unsigned long gfn = memslot->base_gfn + pagenum;
776 	unsigned long gpa = gfn << PAGE_SHIFT;
777 	pte_t *ptep;
778 	unsigned int shift;
779 	int ret = 0;
780 
781 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
782 	if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
783 		ret = 1;
784 		if (shift)
785 			ret = 1 << (shift - PAGE_SHIFT);
786 		kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
787 					gpa, shift);
788 		kvmppc_radix_tlbie_page(kvm, gpa, shift);
789 	}
790 	return ret;
791 }
792 
793 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
794 			struct kvm_memory_slot *memslot, unsigned long *map)
795 {
796 	unsigned long i, j;
797 	int npages;
798 
799 	for (i = 0; i < memslot->npages; i = j) {
800 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
801 
802 		/*
803 		 * Note that if npages > 0 then i must be a multiple of npages,
804 		 * since huge pages are only used to back the guest at guest
805 		 * real addresses that are a multiple of their size.
806 		 * Since we have at most one PTE covering any given guest
807 		 * real address, if npages > 1 we can skip to i + npages.
808 		 */
809 		j = i + 1;
810 		if (npages) {
811 			set_dirty_bits(map, i, npages);
812 			j = i + npages;
813 		}
814 	}
815 	return 0;
816 }
817 
818 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
819 				 int psize, int *indexp)
820 {
821 	if (!mmu_psize_defs[psize].shift)
822 		return;
823 	info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
824 		(mmu_psize_defs[psize].ap << 29);
825 	++(*indexp);
826 }
827 
828 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
829 {
830 	int i;
831 
832 	if (!radix_enabled())
833 		return -EINVAL;
834 	memset(info, 0, sizeof(*info));
835 
836 	/* 4k page size */
837 	info->geometries[0].page_shift = 12;
838 	info->geometries[0].level_bits[0] = 9;
839 	for (i = 1; i < 4; ++i)
840 		info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
841 	/* 64k page size */
842 	info->geometries[1].page_shift = 16;
843 	for (i = 0; i < 4; ++i)
844 		info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
845 
846 	i = 0;
847 	add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
848 	add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
849 	add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
850 	add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
851 
852 	return 0;
853 }
854 
855 int kvmppc_init_vm_radix(struct kvm *kvm)
856 {
857 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
858 	if (!kvm->arch.pgtable)
859 		return -ENOMEM;
860 	return 0;
861 }
862 
863 static void pte_ctor(void *addr)
864 {
865 	memset(addr, 0, RADIX_PTE_TABLE_SIZE);
866 }
867 
868 static void pmd_ctor(void *addr)
869 {
870 	memset(addr, 0, RADIX_PMD_TABLE_SIZE);
871 }
872 
873 int kvmppc_radix_init(void)
874 {
875 	unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
876 
877 	kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
878 	if (!kvm_pte_cache)
879 		return -ENOMEM;
880 
881 	size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
882 
883 	kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
884 	if (!kvm_pmd_cache) {
885 		kmem_cache_destroy(kvm_pte_cache);
886 		return -ENOMEM;
887 	}
888 
889 	return 0;
890 }
891 
892 void kvmppc_radix_exit(void)
893 {
894 	kmem_cache_destroy(kvm_pte_cache);
895 	kmem_cache_destroy(kvm_pmd_cache);
896 }
897