xref: /openbmc/linux/arch/powerpc/kvm/book3s_64_mmu_radix.c (revision ba61bb17496d1664bf7c5c2fd650d5fd78bd0a92)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/page.h>
17 #include <asm/mmu.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21 
22 /*
23  * Supported radix tree geometry.
24  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
25  * for a page size of 64k or 4k.
26  */
27 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
28 
29 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
30 			   struct kvmppc_pte *gpte, bool data, bool iswrite)
31 {
32 	struct kvm *kvm = vcpu->kvm;
33 	u32 pid;
34 	int ret, level, ps;
35 	__be64 prte, rpte;
36 	unsigned long ptbl;
37 	unsigned long root, pte, index;
38 	unsigned long rts, bits, offset;
39 	unsigned long gpa;
40 	unsigned long proc_tbl_size;
41 
42 	/* Work out effective PID */
43 	switch (eaddr >> 62) {
44 	case 0:
45 		pid = vcpu->arch.pid;
46 		break;
47 	case 3:
48 		pid = 0;
49 		break;
50 	default:
51 		return -EINVAL;
52 	}
53 	proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
54 	if (pid * 16 >= proc_tbl_size)
55 		return -EINVAL;
56 
57 	/* Read partition table to find root of tree for effective PID */
58 	ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
59 	ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
60 	if (ret)
61 		return ret;
62 
63 	root = be64_to_cpu(prte);
64 	rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
65 		((root & RTS2_MASK) >> RTS2_SHIFT);
66 	bits = root & RPDS_MASK;
67 	root = root & RPDB_MASK;
68 
69 	/* P9 DD1 interprets RTS (radix tree size) differently */
70 	offset = rts + 31;
71 	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
72 		offset -= 3;
73 
74 	/* current implementations only support 52-bit space */
75 	if (offset != 52)
76 		return -EINVAL;
77 
78 	for (level = 3; level >= 0; --level) {
79 		if (level && bits != p9_supported_radix_bits[level])
80 			return -EINVAL;
81 		if (level == 0 && !(bits == 5 || bits == 9))
82 			return -EINVAL;
83 		offset -= bits;
84 		index = (eaddr >> offset) & ((1UL << bits) - 1);
85 		/* check that low bits of page table base are zero */
86 		if (root & ((1UL << (bits + 3)) - 1))
87 			return -EINVAL;
88 		ret = kvm_read_guest(kvm, root + index * 8,
89 				     &rpte, sizeof(rpte));
90 		if (ret)
91 			return ret;
92 		pte = __be64_to_cpu(rpte);
93 		if (!(pte & _PAGE_PRESENT))
94 			return -ENOENT;
95 		if (pte & _PAGE_PTE)
96 			break;
97 		bits = pte & 0x1f;
98 		root = pte & 0x0fffffffffffff00ul;
99 	}
100 	/* need a leaf at lowest level; 512GB pages not supported */
101 	if (level < 0 || level == 3)
102 		return -EINVAL;
103 
104 	/* offset is now log base 2 of the page size */
105 	gpa = pte & 0x01fffffffffff000ul;
106 	if (gpa & ((1ul << offset) - 1))
107 		return -EINVAL;
108 	gpa += eaddr & ((1ul << offset) - 1);
109 	for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
110 		if (offset == mmu_psize_defs[ps].shift)
111 			break;
112 	gpte->page_size = ps;
113 
114 	gpte->eaddr = eaddr;
115 	gpte->raddr = gpa;
116 
117 	/* Work out permissions */
118 	gpte->may_read = !!(pte & _PAGE_READ);
119 	gpte->may_write = !!(pte & _PAGE_WRITE);
120 	gpte->may_execute = !!(pte & _PAGE_EXEC);
121 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
122 		if (pte & _PAGE_PRIVILEGED) {
123 			gpte->may_read = 0;
124 			gpte->may_write = 0;
125 			gpte->may_execute = 0;
126 		}
127 	} else {
128 		if (!(pte & _PAGE_PRIVILEGED)) {
129 			/* Check AMR/IAMR to see if strict mode is in force */
130 			if (vcpu->arch.amr & (1ul << 62))
131 				gpte->may_read = 0;
132 			if (vcpu->arch.amr & (1ul << 63))
133 				gpte->may_write = 0;
134 			if (vcpu->arch.iamr & (1ul << 62))
135 				gpte->may_execute = 0;
136 		}
137 	}
138 
139 	return 0;
140 }
141 
142 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
143 				    unsigned int pshift)
144 {
145 	unsigned long psize = PAGE_SIZE;
146 
147 	if (pshift)
148 		psize = 1UL << pshift;
149 
150 	addr &= ~(psize - 1);
151 	radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
152 }
153 
154 static void kvmppc_radix_flush_pwc(struct kvm *kvm)
155 {
156 	radix__flush_pwc_lpid(kvm->arch.lpid);
157 }
158 
159 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
160 				      unsigned long clr, unsigned long set,
161 				      unsigned long addr, unsigned int shift)
162 {
163 	unsigned long old = 0;
164 
165 	if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
166 	    pte_present(*ptep)) {
167 		/* have to invalidate it first */
168 		old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
169 		kvmppc_radix_tlbie_page(kvm, addr, shift);
170 		set |= _PAGE_PRESENT;
171 		old &= _PAGE_PRESENT;
172 	}
173 	return __radix_pte_update(ptep, clr, set) | old;
174 }
175 
176 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
177 			     pte_t *ptep, pte_t pte)
178 {
179 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
180 }
181 
182 static struct kmem_cache *kvm_pte_cache;
183 static struct kmem_cache *kvm_pmd_cache;
184 
185 static pte_t *kvmppc_pte_alloc(void)
186 {
187 	return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
188 }
189 
190 static void kvmppc_pte_free(pte_t *ptep)
191 {
192 	kmem_cache_free(kvm_pte_cache, ptep);
193 }
194 
195 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
196 static inline int pmd_is_leaf(pmd_t pmd)
197 {
198 	return !!(pmd_val(pmd) & _PAGE_PTE);
199 }
200 
201 static pmd_t *kvmppc_pmd_alloc(void)
202 {
203 	return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
204 }
205 
206 static void kvmppc_pmd_free(pmd_t *pmdp)
207 {
208 	kmem_cache_free(kvm_pmd_cache, pmdp);
209 }
210 
211 static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
212 			     unsigned long gpa, unsigned int shift)
213 
214 {
215 	unsigned long page_size = 1ul << shift;
216 	unsigned long old;
217 
218 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
219 	kvmppc_radix_tlbie_page(kvm, gpa, shift);
220 	if (old & _PAGE_DIRTY) {
221 		unsigned long gfn = gpa >> PAGE_SHIFT;
222 		struct kvm_memory_slot *memslot;
223 
224 		memslot = gfn_to_memslot(kvm, gfn);
225 		if (memslot && memslot->dirty_bitmap)
226 			kvmppc_update_dirty_map(memslot, gfn, page_size);
227 	}
228 }
229 
230 /*
231  * kvmppc_free_p?d are used to free existing page tables, and recursively
232  * descend and clear and free children.
233  * Callers are responsible for flushing the PWC.
234  *
235  * When page tables are being unmapped/freed as part of page fault path
236  * (full == false), ptes are not expected. There is code to unmap them
237  * and emit a warning if encountered, but there may already be data
238  * corruption due to the unexpected mappings.
239  */
240 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
241 {
242 	if (full) {
243 		memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
244 	} else {
245 		pte_t *p = pte;
246 		unsigned long it;
247 
248 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
249 			if (pte_val(*p) == 0)
250 				continue;
251 			WARN_ON_ONCE(1);
252 			kvmppc_unmap_pte(kvm, p,
253 					 pte_pfn(*p) << PAGE_SHIFT,
254 					 PAGE_SHIFT);
255 		}
256 	}
257 
258 	kvmppc_pte_free(pte);
259 }
260 
261 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
262 {
263 	unsigned long im;
264 	pmd_t *p = pmd;
265 
266 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
267 		if (!pmd_present(*p))
268 			continue;
269 		if (pmd_is_leaf(*p)) {
270 			if (full) {
271 				pmd_clear(p);
272 			} else {
273 				WARN_ON_ONCE(1);
274 				kvmppc_unmap_pte(kvm, (pte_t *)p,
275 					 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
276 					 PMD_SHIFT);
277 			}
278 		} else {
279 			pte_t *pte;
280 
281 			pte = pte_offset_map(p, 0);
282 			kvmppc_unmap_free_pte(kvm, pte, full);
283 			pmd_clear(p);
284 		}
285 	}
286 	kvmppc_pmd_free(pmd);
287 }
288 
289 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
290 {
291 	unsigned long iu;
292 	pud_t *p = pud;
293 
294 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
295 		if (!pud_present(*p))
296 			continue;
297 		if (pud_huge(*p)) {
298 			pud_clear(p);
299 		} else {
300 			pmd_t *pmd;
301 
302 			pmd = pmd_offset(p, 0);
303 			kvmppc_unmap_free_pmd(kvm, pmd, true);
304 			pud_clear(p);
305 		}
306 	}
307 	pud_free(kvm->mm, pud);
308 }
309 
310 void kvmppc_free_radix(struct kvm *kvm)
311 {
312 	unsigned long ig;
313 	pgd_t *pgd;
314 
315 	if (!kvm->arch.pgtable)
316 		return;
317 	pgd = kvm->arch.pgtable;
318 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
319 		pud_t *pud;
320 
321 		if (!pgd_present(*pgd))
322 			continue;
323 		pud = pud_offset(pgd, 0);
324 		kvmppc_unmap_free_pud(kvm, pud);
325 		pgd_clear(pgd);
326 	}
327 	pgd_free(kvm->mm, kvm->arch.pgtable);
328 	kvm->arch.pgtable = NULL;
329 }
330 
331 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
332 					      unsigned long gpa)
333 {
334 	pte_t *pte = pte_offset_kernel(pmd, 0);
335 
336 	/*
337 	 * Clearing the pmd entry then flushing the PWC ensures that the pte
338 	 * page no longer be cached by the MMU, so can be freed without
339 	 * flushing the PWC again.
340 	 */
341 	pmd_clear(pmd);
342 	kvmppc_radix_flush_pwc(kvm);
343 
344 	kvmppc_unmap_free_pte(kvm, pte, false);
345 }
346 
347 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
348 					unsigned long gpa)
349 {
350 	pmd_t *pmd = pmd_offset(pud, 0);
351 
352 	/*
353 	 * Clearing the pud entry then flushing the PWC ensures that the pmd
354 	 * page and any children pte pages will no longer be cached by the MMU,
355 	 * so can be freed without flushing the PWC again.
356 	 */
357 	pud_clear(pud);
358 	kvmppc_radix_flush_pwc(kvm);
359 
360 	kvmppc_unmap_free_pmd(kvm, pmd, false);
361 }
362 
363 /*
364  * There are a number of bits which may differ between different faults to
365  * the same partition scope entry. RC bits, in the course of cleaning and
366  * aging. And the write bit can change, either the access could have been
367  * upgraded, or a read fault could happen concurrently with a write fault
368  * that sets those bits first.
369  */
370 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
371 
372 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
373 			     unsigned int level, unsigned long mmu_seq)
374 {
375 	pgd_t *pgd;
376 	pud_t *pud, *new_pud = NULL;
377 	pmd_t *pmd, *new_pmd = NULL;
378 	pte_t *ptep, *new_ptep = NULL;
379 	int ret;
380 
381 	/* Traverse the guest's 2nd-level tree, allocate new levels needed */
382 	pgd = kvm->arch.pgtable + pgd_index(gpa);
383 	pud = NULL;
384 	if (pgd_present(*pgd))
385 		pud = pud_offset(pgd, gpa);
386 	else
387 		new_pud = pud_alloc_one(kvm->mm, gpa);
388 
389 	pmd = NULL;
390 	if (pud && pud_present(*pud) && !pud_huge(*pud))
391 		pmd = pmd_offset(pud, gpa);
392 	else if (level <= 1)
393 		new_pmd = kvmppc_pmd_alloc();
394 
395 	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
396 		new_ptep = kvmppc_pte_alloc();
397 
398 	/* Check if we might have been invalidated; let the guest retry if so */
399 	spin_lock(&kvm->mmu_lock);
400 	ret = -EAGAIN;
401 	if (mmu_notifier_retry(kvm, mmu_seq))
402 		goto out_unlock;
403 
404 	/* Now traverse again under the lock and change the tree */
405 	ret = -ENOMEM;
406 	if (pgd_none(*pgd)) {
407 		if (!new_pud)
408 			goto out_unlock;
409 		pgd_populate(kvm->mm, pgd, new_pud);
410 		new_pud = NULL;
411 	}
412 	pud = pud_offset(pgd, gpa);
413 	if (pud_huge(*pud)) {
414 		unsigned long hgpa = gpa & PUD_MASK;
415 
416 		/* Check if we raced and someone else has set the same thing */
417 		if (level == 2) {
418 			if (pud_raw(*pud) == pte_raw(pte)) {
419 				ret = 0;
420 				goto out_unlock;
421 			}
422 			/* Valid 1GB page here already, add our extra bits */
423 			WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
424 							PTE_BITS_MUST_MATCH);
425 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
426 					      0, pte_val(pte), hgpa, PUD_SHIFT);
427 			ret = 0;
428 			goto out_unlock;
429 		}
430 		/*
431 		 * If we raced with another CPU which has just put
432 		 * a 1GB pte in after we saw a pmd page, try again.
433 		 */
434 		if (!new_pmd) {
435 			ret = -EAGAIN;
436 			goto out_unlock;
437 		}
438 		/* Valid 1GB page here already, remove it */
439 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
440 	}
441 	if (level == 2) {
442 		if (!pud_none(*pud)) {
443 			/*
444 			 * There's a page table page here, but we wanted to
445 			 * install a large page, so remove and free the page
446 			 * table page.
447 			 */
448 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
449 		}
450 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
451 		ret = 0;
452 		goto out_unlock;
453 	}
454 	if (pud_none(*pud)) {
455 		if (!new_pmd)
456 			goto out_unlock;
457 		pud_populate(kvm->mm, pud, new_pmd);
458 		new_pmd = NULL;
459 	}
460 	pmd = pmd_offset(pud, gpa);
461 	if (pmd_is_leaf(*pmd)) {
462 		unsigned long lgpa = gpa & PMD_MASK;
463 
464 		/* Check if we raced and someone else has set the same thing */
465 		if (level == 1) {
466 			if (pmd_raw(*pmd) == pte_raw(pte)) {
467 				ret = 0;
468 				goto out_unlock;
469 			}
470 			/* Valid 2MB page here already, add our extra bits */
471 			WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
472 							PTE_BITS_MUST_MATCH);
473 			kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
474 					      0, pte_val(pte), lgpa, PMD_SHIFT);
475 			ret = 0;
476 			goto out_unlock;
477 		}
478 
479 		/*
480 		 * If we raced with another CPU which has just put
481 		 * a 2MB pte in after we saw a pte page, try again.
482 		 */
483 		if (!new_ptep) {
484 			ret = -EAGAIN;
485 			goto out_unlock;
486 		}
487 		/* Valid 2MB page here already, remove it */
488 		kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
489 	}
490 	if (level == 1) {
491 		if (!pmd_none(*pmd)) {
492 			/*
493 			 * There's a page table page here, but we wanted to
494 			 * install a large page, so remove and free the page
495 			 * table page.
496 			 */
497 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
498 		}
499 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
500 		ret = 0;
501 		goto out_unlock;
502 	}
503 	if (pmd_none(*pmd)) {
504 		if (!new_ptep)
505 			goto out_unlock;
506 		pmd_populate(kvm->mm, pmd, new_ptep);
507 		new_ptep = NULL;
508 	}
509 	ptep = pte_offset_kernel(pmd, gpa);
510 	if (pte_present(*ptep)) {
511 		/* Check if someone else set the same thing */
512 		if (pte_raw(*ptep) == pte_raw(pte)) {
513 			ret = 0;
514 			goto out_unlock;
515 		}
516 		/* Valid page here already, add our extra bits */
517 		WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
518 							PTE_BITS_MUST_MATCH);
519 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
520 		ret = 0;
521 		goto out_unlock;
522 	}
523 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
524 	ret = 0;
525 
526  out_unlock:
527 	spin_unlock(&kvm->mmu_lock);
528 	if (new_pud)
529 		pud_free(kvm->mm, new_pud);
530 	if (new_pmd)
531 		kvmppc_pmd_free(new_pmd);
532 	if (new_ptep)
533 		kvmppc_pte_free(new_ptep);
534 	return ret;
535 }
536 
537 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
538 				   unsigned long ea, unsigned long dsisr)
539 {
540 	struct kvm *kvm = vcpu->kvm;
541 	unsigned long mmu_seq, pte_size;
542 	unsigned long gpa, gfn, hva, pfn;
543 	struct kvm_memory_slot *memslot;
544 	struct page *page = NULL;
545 	long ret;
546 	bool writing;
547 	bool upgrade_write = false;
548 	bool *upgrade_p = &upgrade_write;
549 	pte_t pte, *ptep;
550 	unsigned long pgflags;
551 	unsigned int shift, level;
552 
553 	/* Check for unusual errors */
554 	if (dsisr & DSISR_UNSUPP_MMU) {
555 		pr_err("KVM: Got unsupported MMU fault\n");
556 		return -EFAULT;
557 	}
558 	if (dsisr & DSISR_BADACCESS) {
559 		/* Reflect to the guest as DSI */
560 		pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
561 		kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
562 		return RESUME_GUEST;
563 	}
564 
565 	/* Translate the logical address and get the page */
566 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
567 	gpa &= ~0xF000000000000000ul;
568 	gfn = gpa >> PAGE_SHIFT;
569 	if (!(dsisr & DSISR_PRTABLE_FAULT))
570 		gpa |= ea & 0xfff;
571 	memslot = gfn_to_memslot(kvm, gfn);
572 
573 	/* No memslot means it's an emulated MMIO region */
574 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
575 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
576 			     DSISR_SET_RC)) {
577 			/*
578 			 * Bad address in guest page table tree, or other
579 			 * unusual error - reflect it to the guest as DSI.
580 			 */
581 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
582 			return RESUME_GUEST;
583 		}
584 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
585 					      dsisr & DSISR_ISSTORE);
586 	}
587 
588 	writing = (dsisr & DSISR_ISSTORE) != 0;
589 	if (memslot->flags & KVM_MEM_READONLY) {
590 		if (writing) {
591 			/* give the guest a DSI */
592 			dsisr = DSISR_ISSTORE | DSISR_PROTFAULT;
593 			kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
594 			return RESUME_GUEST;
595 		}
596 		upgrade_p = NULL;
597 	}
598 
599 	if (dsisr & DSISR_SET_RC) {
600 		/*
601 		 * Need to set an R or C bit in the 2nd-level tables;
602 		 * since we are just helping out the hardware here,
603 		 * it is sufficient to do what the hardware does.
604 		 */
605 		pgflags = _PAGE_ACCESSED;
606 		if (writing)
607 			pgflags |= _PAGE_DIRTY;
608 		/*
609 		 * We are walking the secondary page table here. We can do this
610 		 * without disabling irq.
611 		 */
612 		spin_lock(&kvm->mmu_lock);
613 		ptep = __find_linux_pte(kvm->arch.pgtable,
614 					gpa, NULL, &shift);
615 		if (ptep && pte_present(*ptep) &&
616 		    (!writing || pte_write(*ptep))) {
617 			kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
618 						gpa, shift);
619 			dsisr &= ~DSISR_SET_RC;
620 		}
621 		spin_unlock(&kvm->mmu_lock);
622 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
623 			       DSISR_PROTFAULT | DSISR_SET_RC)))
624 			return RESUME_GUEST;
625 	}
626 
627 	/* used to check for invalidations in progress */
628 	mmu_seq = kvm->mmu_notifier_seq;
629 	smp_rmb();
630 
631 	/*
632 	 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
633 	 * do it with !atomic && !async, which is how we call it.
634 	 * We always ask for write permission since the common case
635 	 * is that the page is writable.
636 	 */
637 	hva = gfn_to_hva_memslot(memslot, gfn);
638 	if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
639 		pfn = page_to_pfn(page);
640 		upgrade_write = true;
641 	} else {
642 		/* Call KVM generic code to do the slow-path check */
643 		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
644 					   writing, upgrade_p);
645 		if (is_error_noslot_pfn(pfn))
646 			return -EFAULT;
647 		page = NULL;
648 		if (pfn_valid(pfn)) {
649 			page = pfn_to_page(pfn);
650 			if (PageReserved(page))
651 				page = NULL;
652 		}
653 	}
654 
655 	/* See if we can insert a 1GB or 2MB large PTE here */
656 	level = 0;
657 	if (page && PageCompound(page)) {
658 		pte_size = PAGE_SIZE << compound_order(compound_head(page));
659 		if (pte_size >= PUD_SIZE &&
660 		    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
661 		    (hva & (PUD_SIZE - PAGE_SIZE))) {
662 			level = 2;
663 			pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
664 		} else if (pte_size >= PMD_SIZE &&
665 			   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
666 			   (hva & (PMD_SIZE - PAGE_SIZE))) {
667 			level = 1;
668 			pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
669 		}
670 	}
671 
672 	/*
673 	 * Compute the PTE value that we need to insert.
674 	 */
675 	if (page) {
676 		pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
677 			_PAGE_ACCESSED;
678 		if (writing || upgrade_write)
679 			pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
680 		pte = pfn_pte(pfn, __pgprot(pgflags));
681 	} else {
682 		/*
683 		 * Read the PTE from the process' radix tree and use that
684 		 * so we get the attribute bits.
685 		 */
686 		local_irq_disable();
687 		ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
688 		pte = *ptep;
689 		local_irq_enable();
690 		if (shift == PUD_SHIFT &&
691 		    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
692 		    (hva & (PUD_SIZE - PAGE_SIZE))) {
693 			level = 2;
694 		} else if (shift == PMD_SHIFT &&
695 			   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
696 			   (hva & (PMD_SIZE - PAGE_SIZE))) {
697 			level = 1;
698 		} else if (shift && shift != PAGE_SHIFT) {
699 			/* Adjust PFN */
700 			unsigned long mask = (1ul << shift) - PAGE_SIZE;
701 			pte = __pte(pte_val(pte) | (hva & mask));
702 		}
703 		pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
704 		if (writing || upgrade_write) {
705 			if (pte_val(pte) & _PAGE_WRITE)
706 				pte = __pte(pte_val(pte) | _PAGE_DIRTY);
707 		} else {
708 			pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
709 		}
710 	}
711 
712 	/* Allocate space in the tree and write the PTE */
713 	ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
714 
715 	if (page) {
716 		if (!ret && (pte_val(pte) & _PAGE_WRITE))
717 			set_page_dirty_lock(page);
718 		put_page(page);
719 	}
720 
721 	if (ret == 0 || ret == -EAGAIN)
722 		ret = RESUME_GUEST;
723 	return ret;
724 }
725 
726 /* Called with kvm->lock held */
727 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
728 		    unsigned long gfn)
729 {
730 	pte_t *ptep;
731 	unsigned long gpa = gfn << PAGE_SHIFT;
732 	unsigned int shift;
733 	unsigned long old;
734 
735 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
736 	if (ptep && pte_present(*ptep)) {
737 		old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
738 					      gpa, shift);
739 		kvmppc_radix_tlbie_page(kvm, gpa, shift);
740 		if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
741 			unsigned long npages = 1;
742 			if (shift)
743 				npages = 1ul << (shift - PAGE_SHIFT);
744 			kvmppc_update_dirty_map(memslot, gfn, npages);
745 		}
746 	}
747 	return 0;
748 }
749 
750 /* Called with kvm->lock held */
751 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
752 		  unsigned long gfn)
753 {
754 	pte_t *ptep;
755 	unsigned long gpa = gfn << PAGE_SHIFT;
756 	unsigned int shift;
757 	int ref = 0;
758 
759 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
760 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
761 		kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
762 					gpa, shift);
763 		/* XXX need to flush tlb here? */
764 		ref = 1;
765 	}
766 	return ref;
767 }
768 
769 /* Called with kvm->lock held */
770 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
771 		       unsigned long gfn)
772 {
773 	pte_t *ptep;
774 	unsigned long gpa = gfn << PAGE_SHIFT;
775 	unsigned int shift;
776 	int ref = 0;
777 
778 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
779 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
780 		ref = 1;
781 	return ref;
782 }
783 
784 /* Returns the number of PAGE_SIZE pages that are dirty */
785 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
786 				struct kvm_memory_slot *memslot, int pagenum)
787 {
788 	unsigned long gfn = memslot->base_gfn + pagenum;
789 	unsigned long gpa = gfn << PAGE_SHIFT;
790 	pte_t *ptep;
791 	unsigned int shift;
792 	int ret = 0;
793 
794 	ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
795 	if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
796 		ret = 1;
797 		if (shift)
798 			ret = 1 << (shift - PAGE_SHIFT);
799 		kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
800 					gpa, shift);
801 		kvmppc_radix_tlbie_page(kvm, gpa, shift);
802 	}
803 	return ret;
804 }
805 
806 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
807 			struct kvm_memory_slot *memslot, unsigned long *map)
808 {
809 	unsigned long i, j;
810 	int npages;
811 
812 	for (i = 0; i < memslot->npages; i = j) {
813 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
814 
815 		/*
816 		 * Note that if npages > 0 then i must be a multiple of npages,
817 		 * since huge pages are only used to back the guest at guest
818 		 * real addresses that are a multiple of their size.
819 		 * Since we have at most one PTE covering any given guest
820 		 * real address, if npages > 1 we can skip to i + npages.
821 		 */
822 		j = i + 1;
823 		if (npages) {
824 			set_dirty_bits(map, i, npages);
825 			j = i + npages;
826 		}
827 	}
828 	return 0;
829 }
830 
831 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
832 				 int psize, int *indexp)
833 {
834 	if (!mmu_psize_defs[psize].shift)
835 		return;
836 	info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
837 		(mmu_psize_defs[psize].ap << 29);
838 	++(*indexp);
839 }
840 
841 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
842 {
843 	int i;
844 
845 	if (!radix_enabled())
846 		return -EINVAL;
847 	memset(info, 0, sizeof(*info));
848 
849 	/* 4k page size */
850 	info->geometries[0].page_shift = 12;
851 	info->geometries[0].level_bits[0] = 9;
852 	for (i = 1; i < 4; ++i)
853 		info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
854 	/* 64k page size */
855 	info->geometries[1].page_shift = 16;
856 	for (i = 0; i < 4; ++i)
857 		info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
858 
859 	i = 0;
860 	add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
861 	add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
862 	add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
863 	add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
864 
865 	return 0;
866 }
867 
868 int kvmppc_init_vm_radix(struct kvm *kvm)
869 {
870 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
871 	if (!kvm->arch.pgtable)
872 		return -ENOMEM;
873 	return 0;
874 }
875 
876 static void pte_ctor(void *addr)
877 {
878 	memset(addr, 0, RADIX_PTE_TABLE_SIZE);
879 }
880 
881 static void pmd_ctor(void *addr)
882 {
883 	memset(addr, 0, RADIX_PMD_TABLE_SIZE);
884 }
885 
886 int kvmppc_radix_init(void)
887 {
888 	unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
889 
890 	kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
891 	if (!kvm_pte_cache)
892 		return -ENOMEM;
893 
894 	size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
895 
896 	kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
897 	if (!kvm_pmd_cache) {
898 		kmem_cache_destroy(kvm_pte_cache);
899 		return -ENOMEM;
900 	}
901 
902 	return 0;
903 }
904 
905 void kvmppc_radix_exit(void)
906 {
907 	kmem_cache_destroy(kvm_pte_cache);
908 	kmem_cache_destroy(kvm_pmd_cache);
909 }
910