1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  */
6 
7 #include <linux/types.h>
8 #include <linux/string.h>
9 #include <linux/kvm.h>
10 #include <linux/kvm_host.h>
11 #include <linux/anon_inodes.h>
12 #include <linux/file.h>
13 #include <linux/debugfs.h>
14 #include <linux/pgtable.h>
15 
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include "book3s_hv.h"
19 #include <asm/page.h>
20 #include <asm/mmu.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pte-walk.h>
23 #include <asm/ultravisor.h>
24 #include <asm/kvm_book3s_uvmem.h>
25 #include <asm/plpar_wrappers.h>
26 #include <asm/firmware.h>
27 
28 /*
29  * Supported radix tree geometry.
30  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
31  * for a page size of 64k or 4k.
32  */
33 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
34 
__kvmhv_copy_tofrom_guest_radix(int lpid,int pid,gva_t eaddr,void * to,void * from,unsigned long n)35 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
36 					      gva_t eaddr, void *to, void *from,
37 					      unsigned long n)
38 {
39 	int old_pid, old_lpid;
40 	unsigned long quadrant, ret = n;
41 	bool is_load = !!to;
42 
43 	/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
44 	if (kvmhv_on_pseries())
45 		return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
46 					  (to != NULL) ? __pa(to): 0,
47 					  (from != NULL) ? __pa(from): 0, n);
48 
49 	if (eaddr & (0xFFFUL << 52))
50 		return ret;
51 
52 	quadrant = 1;
53 	if (!pid)
54 		quadrant = 2;
55 	if (is_load)
56 		from = (void *) (eaddr | (quadrant << 62));
57 	else
58 		to = (void *) (eaddr | (quadrant << 62));
59 
60 	preempt_disable();
61 
62 	asm volatile("hwsync" ::: "memory");
63 	isync();
64 	/* switch the lpid first to avoid running host with unallocated pid */
65 	old_lpid = mfspr(SPRN_LPID);
66 	if (old_lpid != lpid)
67 		mtspr(SPRN_LPID, lpid);
68 	if (quadrant == 1) {
69 		old_pid = mfspr(SPRN_PID);
70 		if (old_pid != pid)
71 			mtspr(SPRN_PID, pid);
72 	}
73 	isync();
74 
75 	pagefault_disable();
76 	if (is_load)
77 		ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
78 	else
79 		ret = __copy_to_user_inatomic((void __user *)to, from, n);
80 	pagefault_enable();
81 
82 	asm volatile("hwsync" ::: "memory");
83 	isync();
84 	/* switch the pid first to avoid running host with unallocated pid */
85 	if (quadrant == 1 && pid != old_pid)
86 		mtspr(SPRN_PID, old_pid);
87 	if (lpid != old_lpid)
88 		mtspr(SPRN_LPID, old_lpid);
89 	isync();
90 
91 	preempt_enable();
92 
93 	return ret;
94 }
95 
kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu * vcpu,gva_t eaddr,void * to,void * from,unsigned long n)96 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
97 					  void *to, void *from, unsigned long n)
98 {
99 	int lpid = vcpu->kvm->arch.lpid;
100 	int pid = vcpu->arch.pid;
101 
102 	/* This would cause a data segment intr so don't allow the access */
103 	if (eaddr & (0x3FFUL << 52))
104 		return -EINVAL;
105 
106 	/* Should we be using the nested lpid */
107 	if (vcpu->arch.nested)
108 		lpid = vcpu->arch.nested->shadow_lpid;
109 
110 	/* If accessing quadrant 3 then pid is expected to be 0 */
111 	if (((eaddr >> 62) & 0x3) == 0x3)
112 		pid = 0;
113 
114 	eaddr &= ~(0xFFFUL << 52);
115 
116 	return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
117 }
118 
kvmhv_copy_from_guest_radix(struct kvm_vcpu * vcpu,gva_t eaddr,void * to,unsigned long n)119 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
120 				 unsigned long n)
121 {
122 	long ret;
123 
124 	ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
125 	if (ret > 0)
126 		memset(to + (n - ret), 0, ret);
127 
128 	return ret;
129 }
130 
kvmhv_copy_to_guest_radix(struct kvm_vcpu * vcpu,gva_t eaddr,void * from,unsigned long n)131 long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
132 			       unsigned long n)
133 {
134 	return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
135 }
136 
kvmppc_mmu_walk_radix_tree(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,u64 root,u64 * pte_ret_p)137 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
138 			       struct kvmppc_pte *gpte, u64 root,
139 			       u64 *pte_ret_p)
140 {
141 	struct kvm *kvm = vcpu->kvm;
142 	int ret, level, ps;
143 	unsigned long rts, bits, offset, index;
144 	u64 pte, base, gpa;
145 	__be64 rpte;
146 
147 	rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
148 		((root & RTS2_MASK) >> RTS2_SHIFT);
149 	bits = root & RPDS_MASK;
150 	base = root & RPDB_MASK;
151 
152 	offset = rts + 31;
153 
154 	/* Current implementations only support 52-bit space */
155 	if (offset != 52)
156 		return -EINVAL;
157 
158 	/* Walk each level of the radix tree */
159 	for (level = 3; level >= 0; --level) {
160 		u64 addr;
161 		/* Check a valid size */
162 		if (level && bits != p9_supported_radix_bits[level])
163 			return -EINVAL;
164 		if (level == 0 && !(bits == 5 || bits == 9))
165 			return -EINVAL;
166 		offset -= bits;
167 		index = (eaddr >> offset) & ((1UL << bits) - 1);
168 		/* Check that low bits of page table base are zero */
169 		if (base & ((1UL << (bits + 3)) - 1))
170 			return -EINVAL;
171 		/* Read the entry from guest memory */
172 		addr = base + (index * sizeof(rpte));
173 
174 		kvm_vcpu_srcu_read_lock(vcpu);
175 		ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
176 		kvm_vcpu_srcu_read_unlock(vcpu);
177 		if (ret) {
178 			if (pte_ret_p)
179 				*pte_ret_p = addr;
180 			return ret;
181 		}
182 		pte = __be64_to_cpu(rpte);
183 		if (!(pte & _PAGE_PRESENT))
184 			return -ENOENT;
185 		/* Check if a leaf entry */
186 		if (pte & _PAGE_PTE)
187 			break;
188 		/* Get ready to walk the next level */
189 		base = pte & RPDB_MASK;
190 		bits = pte & RPDS_MASK;
191 	}
192 
193 	/* Need a leaf at lowest level; 512GB pages not supported */
194 	if (level < 0 || level == 3)
195 		return -EINVAL;
196 
197 	/* We found a valid leaf PTE */
198 	/* Offset is now log base 2 of the page size */
199 	gpa = pte & 0x01fffffffffff000ul;
200 	if (gpa & ((1ul << offset) - 1))
201 		return -EINVAL;
202 	gpa |= eaddr & ((1ul << offset) - 1);
203 	for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
204 		if (offset == mmu_psize_defs[ps].shift)
205 			break;
206 	gpte->page_size = ps;
207 	gpte->page_shift = offset;
208 
209 	gpte->eaddr = eaddr;
210 	gpte->raddr = gpa;
211 
212 	/* Work out permissions */
213 	gpte->may_read = !!(pte & _PAGE_READ);
214 	gpte->may_write = !!(pte & _PAGE_WRITE);
215 	gpte->may_execute = !!(pte & _PAGE_EXEC);
216 
217 	gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
218 
219 	if (pte_ret_p)
220 		*pte_ret_p = pte;
221 
222 	return 0;
223 }
224 
225 /*
226  * Used to walk a partition or process table radix tree in guest memory
227  * Note: We exploit the fact that a partition table and a process
228  * table have the same layout, a partition-scoped page table and a
229  * process-scoped page table have the same layout, and the 2nd
230  * doubleword of a partition table entry has the same layout as
231  * the PTCR register.
232  */
kvmppc_mmu_radix_translate_table(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,u64 table,int table_index,u64 * pte_ret_p)233 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
234 				     struct kvmppc_pte *gpte, u64 table,
235 				     int table_index, u64 *pte_ret_p)
236 {
237 	struct kvm *kvm = vcpu->kvm;
238 	int ret;
239 	unsigned long size, ptbl, root;
240 	struct prtb_entry entry;
241 
242 	if ((table & PRTS_MASK) > 24)
243 		return -EINVAL;
244 	size = 1ul << ((table & PRTS_MASK) + 12);
245 
246 	/* Is the table big enough to contain this entry? */
247 	if ((table_index * sizeof(entry)) >= size)
248 		return -EINVAL;
249 
250 	/* Read the table to find the root of the radix tree */
251 	ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
252 	kvm_vcpu_srcu_read_lock(vcpu);
253 	ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
254 	kvm_vcpu_srcu_read_unlock(vcpu);
255 	if (ret)
256 		return ret;
257 
258 	/* Root is stored in the first double word */
259 	root = be64_to_cpu(entry.prtb0);
260 
261 	return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
262 }
263 
kvmppc_mmu_radix_xlate(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,bool data,bool iswrite)264 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
265 			   struct kvmppc_pte *gpte, bool data, bool iswrite)
266 {
267 	u32 pid;
268 	u64 pte;
269 	int ret;
270 
271 	/* Work out effective PID */
272 	switch (eaddr >> 62) {
273 	case 0:
274 		pid = vcpu->arch.pid;
275 		break;
276 	case 3:
277 		pid = 0;
278 		break;
279 	default:
280 		return -EINVAL;
281 	}
282 
283 	ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
284 				vcpu->kvm->arch.process_table, pid, &pte);
285 	if (ret)
286 		return ret;
287 
288 	/* Check privilege (applies only to process scoped translations) */
289 	if (kvmppc_get_msr(vcpu) & MSR_PR) {
290 		if (pte & _PAGE_PRIVILEGED) {
291 			gpte->may_read = 0;
292 			gpte->may_write = 0;
293 			gpte->may_execute = 0;
294 		}
295 	} else {
296 		if (!(pte & _PAGE_PRIVILEGED)) {
297 			/* Check AMR/IAMR to see if strict mode is in force */
298 			if (kvmppc_get_amr_hv(vcpu) & (1ul << 62))
299 				gpte->may_read = 0;
300 			if (kvmppc_get_amr_hv(vcpu) & (1ul << 63))
301 				gpte->may_write = 0;
302 			if (vcpu->arch.iamr & (1ul << 62))
303 				gpte->may_execute = 0;
304 		}
305 	}
306 
307 	return 0;
308 }
309 
kvmppc_radix_tlbie_page(struct kvm * kvm,unsigned long addr,unsigned int pshift,unsigned int lpid)310 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
311 			     unsigned int pshift, unsigned int lpid)
312 {
313 	unsigned long psize = PAGE_SIZE;
314 	int psi;
315 	long rc;
316 	unsigned long rb;
317 
318 	if (pshift)
319 		psize = 1UL << pshift;
320 	else
321 		pshift = PAGE_SHIFT;
322 
323 	addr &= ~(psize - 1);
324 
325 	if (!kvmhv_on_pseries()) {
326 		radix__flush_tlb_lpid_page(lpid, addr, psize);
327 		return;
328 	}
329 
330 	psi = shift_to_mmu_psize(pshift);
331 
332 	if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
333 		rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
334 		rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
335 					lpid, rb);
336 	} else {
337 		rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
338 					    H_RPTI_TYPE_NESTED |
339 					    H_RPTI_TYPE_TLB,
340 					    psize_to_rpti_pgsize(psi),
341 					    addr, addr + psize);
342 	}
343 
344 	if (rc)
345 		pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
346 }
347 
kvmppc_radix_flush_pwc(struct kvm * kvm,unsigned int lpid)348 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
349 {
350 	long rc;
351 
352 	if (!kvmhv_on_pseries()) {
353 		radix__flush_pwc_lpid(lpid);
354 		return;
355 	}
356 
357 	if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
358 		rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
359 					lpid, TLBIEL_INVAL_SET_LPID);
360 	else
361 		rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
362 					    H_RPTI_TYPE_NESTED |
363 					    H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
364 					    0, -1UL);
365 	if (rc)
366 		pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
367 }
368 
kvmppc_radix_update_pte(struct kvm * kvm,pte_t * ptep,unsigned long clr,unsigned long set,unsigned long addr,unsigned int shift)369 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
370 				      unsigned long clr, unsigned long set,
371 				      unsigned long addr, unsigned int shift)
372 {
373 	return __radix_pte_update(ptep, clr, set);
374 }
375 
kvmppc_radix_set_pte_at(struct kvm * kvm,unsigned long addr,pte_t * ptep,pte_t pte)376 static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
377 			     pte_t *ptep, pte_t pte)
378 {
379 	radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
380 }
381 
382 static struct kmem_cache *kvm_pte_cache;
383 static struct kmem_cache *kvm_pmd_cache;
384 
kvmppc_pte_alloc(void)385 static pte_t *kvmppc_pte_alloc(void)
386 {
387 	pte_t *pte;
388 
389 	pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
390 	/* pmd_populate() will only reference _pa(pte). */
391 	kmemleak_ignore(pte);
392 
393 	return pte;
394 }
395 
kvmppc_pte_free(pte_t * ptep)396 static void kvmppc_pte_free(pte_t *ptep)
397 {
398 	kmem_cache_free(kvm_pte_cache, ptep);
399 }
400 
kvmppc_pmd_alloc(void)401 static pmd_t *kvmppc_pmd_alloc(void)
402 {
403 	pmd_t *pmd;
404 
405 	pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
406 	/* pud_populate() will only reference _pa(pmd). */
407 	kmemleak_ignore(pmd);
408 
409 	return pmd;
410 }
411 
kvmppc_pmd_free(pmd_t * pmdp)412 static void kvmppc_pmd_free(pmd_t *pmdp)
413 {
414 	kmem_cache_free(kvm_pmd_cache, pmdp);
415 }
416 
417 /* Called with kvm->mmu_lock held */
kvmppc_unmap_pte(struct kvm * kvm,pte_t * pte,unsigned long gpa,unsigned int shift,const struct kvm_memory_slot * memslot,unsigned int lpid)418 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
419 		      unsigned int shift,
420 		      const struct kvm_memory_slot *memslot,
421 		      unsigned int lpid)
422 
423 {
424 	unsigned long old;
425 	unsigned long gfn = gpa >> PAGE_SHIFT;
426 	unsigned long page_size = PAGE_SIZE;
427 	unsigned long hpa;
428 
429 	old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
430 	kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
431 
432 	/* The following only applies to L1 entries */
433 	if (lpid != kvm->arch.lpid)
434 		return;
435 
436 	if (!memslot) {
437 		memslot = gfn_to_memslot(kvm, gfn);
438 		if (!memslot)
439 			return;
440 	}
441 	if (shift) { /* 1GB or 2MB page */
442 		page_size = 1ul << shift;
443 		if (shift == PMD_SHIFT)
444 			kvm->stat.num_2M_pages--;
445 		else if (shift == PUD_SHIFT)
446 			kvm->stat.num_1G_pages--;
447 	}
448 
449 	gpa &= ~(page_size - 1);
450 	hpa = old & PTE_RPN_MASK;
451 	kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
452 
453 	if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
454 		kvmppc_update_dirty_map(memslot, gfn, page_size);
455 }
456 
457 /*
458  * kvmppc_free_p?d are used to free existing page tables, and recursively
459  * descend and clear and free children.
460  * Callers are responsible for flushing the PWC.
461  *
462  * When page tables are being unmapped/freed as part of page fault path
463  * (full == false), valid ptes are generally not expected; however, there
464  * is one situation where they arise, which is when dirty page logging is
465  * turned off for a memslot while the VM is running.  The new memslot
466  * becomes visible to page faults before the memslot commit function
467  * gets to flush the memslot, which can lead to a 2MB page mapping being
468  * installed for a guest physical address where there are already 64kB
469  * (or 4kB) mappings (of sub-pages of the same 2MB page).
470  */
kvmppc_unmap_free_pte(struct kvm * kvm,pte_t * pte,bool full,unsigned int lpid)471 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
472 				  unsigned int lpid)
473 {
474 	if (full) {
475 		memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
476 	} else {
477 		pte_t *p = pte;
478 		unsigned long it;
479 
480 		for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
481 			if (pte_val(*p) == 0)
482 				continue;
483 			kvmppc_unmap_pte(kvm, p,
484 					 pte_pfn(*p) << PAGE_SHIFT,
485 					 PAGE_SHIFT, NULL, lpid);
486 		}
487 	}
488 
489 	kvmppc_pte_free(pte);
490 }
491 
kvmppc_unmap_free_pmd(struct kvm * kvm,pmd_t * pmd,bool full,unsigned int lpid)492 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
493 				  unsigned int lpid)
494 {
495 	unsigned long im;
496 	pmd_t *p = pmd;
497 
498 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
499 		if (!pmd_present(*p))
500 			continue;
501 		if (pmd_is_leaf(*p)) {
502 			if (full) {
503 				pmd_clear(p);
504 			} else {
505 				WARN_ON_ONCE(1);
506 				kvmppc_unmap_pte(kvm, (pte_t *)p,
507 					 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
508 					 PMD_SHIFT, NULL, lpid);
509 			}
510 		} else {
511 			pte_t *pte;
512 
513 			pte = pte_offset_kernel(p, 0);
514 			kvmppc_unmap_free_pte(kvm, pte, full, lpid);
515 			pmd_clear(p);
516 		}
517 	}
518 	kvmppc_pmd_free(pmd);
519 }
520 
kvmppc_unmap_free_pud(struct kvm * kvm,pud_t * pud,unsigned int lpid)521 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
522 				  unsigned int lpid)
523 {
524 	unsigned long iu;
525 	pud_t *p = pud;
526 
527 	for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
528 		if (!pud_present(*p))
529 			continue;
530 		if (pud_is_leaf(*p)) {
531 			pud_clear(p);
532 		} else {
533 			pmd_t *pmd;
534 
535 			pmd = pmd_offset(p, 0);
536 			kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
537 			pud_clear(p);
538 		}
539 	}
540 	pud_free(kvm->mm, pud);
541 }
542 
kvmppc_free_pgtable_radix(struct kvm * kvm,pgd_t * pgd,unsigned int lpid)543 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
544 {
545 	unsigned long ig;
546 
547 	for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
548 		p4d_t *p4d = p4d_offset(pgd, 0);
549 		pud_t *pud;
550 
551 		if (!p4d_present(*p4d))
552 			continue;
553 		pud = pud_offset(p4d, 0);
554 		kvmppc_unmap_free_pud(kvm, pud, lpid);
555 		p4d_clear(p4d);
556 	}
557 }
558 
kvmppc_free_radix(struct kvm * kvm)559 void kvmppc_free_radix(struct kvm *kvm)
560 {
561 	if (kvm->arch.pgtable) {
562 		kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
563 					  kvm->arch.lpid);
564 		pgd_free(kvm->mm, kvm->arch.pgtable);
565 		kvm->arch.pgtable = NULL;
566 	}
567 }
568 
kvmppc_unmap_free_pmd_entry_table(struct kvm * kvm,pmd_t * pmd,unsigned long gpa,unsigned int lpid)569 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
570 					unsigned long gpa, unsigned int lpid)
571 {
572 	pte_t *pte = pte_offset_kernel(pmd, 0);
573 
574 	/*
575 	 * Clearing the pmd entry then flushing the PWC ensures that the pte
576 	 * page no longer be cached by the MMU, so can be freed without
577 	 * flushing the PWC again.
578 	 */
579 	pmd_clear(pmd);
580 	kvmppc_radix_flush_pwc(kvm, lpid);
581 
582 	kvmppc_unmap_free_pte(kvm, pte, false, lpid);
583 }
584 
kvmppc_unmap_free_pud_entry_table(struct kvm * kvm,pud_t * pud,unsigned long gpa,unsigned int lpid)585 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
586 					unsigned long gpa, unsigned int lpid)
587 {
588 	pmd_t *pmd = pmd_offset(pud, 0);
589 
590 	/*
591 	 * Clearing the pud entry then flushing the PWC ensures that the pmd
592 	 * page and any children pte pages will no longer be cached by the MMU,
593 	 * so can be freed without flushing the PWC again.
594 	 */
595 	pud_clear(pud);
596 	kvmppc_radix_flush_pwc(kvm, lpid);
597 
598 	kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
599 }
600 
601 /*
602  * There are a number of bits which may differ between different faults to
603  * the same partition scope entry. RC bits, in the course of cleaning and
604  * aging. And the write bit can change, either the access could have been
605  * upgraded, or a read fault could happen concurrently with a write fault
606  * that sets those bits first.
607  */
608 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
609 
kvmppc_create_pte(struct kvm * kvm,pgd_t * pgtable,pte_t pte,unsigned long gpa,unsigned int level,unsigned long mmu_seq,unsigned int lpid,unsigned long * rmapp,struct rmap_nested ** n_rmap)610 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
611 		      unsigned long gpa, unsigned int level,
612 		      unsigned long mmu_seq, unsigned int lpid,
613 		      unsigned long *rmapp, struct rmap_nested **n_rmap)
614 {
615 	pgd_t *pgd;
616 	p4d_t *p4d;
617 	pud_t *pud, *new_pud = NULL;
618 	pmd_t *pmd, *new_pmd = NULL;
619 	pte_t *ptep, *new_ptep = NULL;
620 	int ret;
621 
622 	/* Traverse the guest's 2nd-level tree, allocate new levels needed */
623 	pgd = pgtable + pgd_index(gpa);
624 	p4d = p4d_offset(pgd, gpa);
625 
626 	pud = NULL;
627 	if (p4d_present(*p4d))
628 		pud = pud_offset(p4d, gpa);
629 	else
630 		new_pud = pud_alloc_one(kvm->mm, gpa);
631 
632 	pmd = NULL;
633 	if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
634 		pmd = pmd_offset(pud, gpa);
635 	else if (level <= 1)
636 		new_pmd = kvmppc_pmd_alloc();
637 
638 	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
639 		new_ptep = kvmppc_pte_alloc();
640 
641 	/* Check if we might have been invalidated; let the guest retry if so */
642 	spin_lock(&kvm->mmu_lock);
643 	ret = -EAGAIN;
644 	if (mmu_invalidate_retry(kvm, mmu_seq))
645 		goto out_unlock;
646 
647 	/* Now traverse again under the lock and change the tree */
648 	ret = -ENOMEM;
649 	if (p4d_none(*p4d)) {
650 		if (!new_pud)
651 			goto out_unlock;
652 		p4d_populate(kvm->mm, p4d, new_pud);
653 		new_pud = NULL;
654 	}
655 	pud = pud_offset(p4d, gpa);
656 	if (pud_is_leaf(*pud)) {
657 		unsigned long hgpa = gpa & PUD_MASK;
658 
659 		/* Check if we raced and someone else has set the same thing */
660 		if (level == 2) {
661 			if (pud_raw(*pud) == pte_raw(pte)) {
662 				ret = 0;
663 				goto out_unlock;
664 			}
665 			/* Valid 1GB page here already, add our extra bits */
666 			WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
667 							PTE_BITS_MUST_MATCH);
668 			kvmppc_radix_update_pte(kvm, (pte_t *)pud,
669 					      0, pte_val(pte), hgpa, PUD_SHIFT);
670 			ret = 0;
671 			goto out_unlock;
672 		}
673 		/*
674 		 * If we raced with another CPU which has just put
675 		 * a 1GB pte in after we saw a pmd page, try again.
676 		 */
677 		if (!new_pmd) {
678 			ret = -EAGAIN;
679 			goto out_unlock;
680 		}
681 		/* Valid 1GB page here already, remove it */
682 		kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
683 				 lpid);
684 	}
685 	if (level == 2) {
686 		if (!pud_none(*pud)) {
687 			/*
688 			 * There's a page table page here, but we wanted to
689 			 * install a large page, so remove and free the page
690 			 * table page.
691 			 */
692 			kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
693 		}
694 		kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
695 		if (rmapp && n_rmap)
696 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
697 		ret = 0;
698 		goto out_unlock;
699 	}
700 	if (pud_none(*pud)) {
701 		if (!new_pmd)
702 			goto out_unlock;
703 		pud_populate(kvm->mm, pud, new_pmd);
704 		new_pmd = NULL;
705 	}
706 	pmd = pmd_offset(pud, gpa);
707 	if (pmd_is_leaf(*pmd)) {
708 		unsigned long lgpa = gpa & PMD_MASK;
709 
710 		/* Check if we raced and someone else has set the same thing */
711 		if (level == 1) {
712 			if (pmd_raw(*pmd) == pte_raw(pte)) {
713 				ret = 0;
714 				goto out_unlock;
715 			}
716 			/* Valid 2MB page here already, add our extra bits */
717 			WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
718 							PTE_BITS_MUST_MATCH);
719 			kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
720 					0, pte_val(pte), lgpa, PMD_SHIFT);
721 			ret = 0;
722 			goto out_unlock;
723 		}
724 
725 		/*
726 		 * If we raced with another CPU which has just put
727 		 * a 2MB pte in after we saw a pte page, try again.
728 		 */
729 		if (!new_ptep) {
730 			ret = -EAGAIN;
731 			goto out_unlock;
732 		}
733 		/* Valid 2MB page here already, remove it */
734 		kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
735 				 lpid);
736 	}
737 	if (level == 1) {
738 		if (!pmd_none(*pmd)) {
739 			/*
740 			 * There's a page table page here, but we wanted to
741 			 * install a large page, so remove and free the page
742 			 * table page.
743 			 */
744 			kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
745 		}
746 		kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
747 		if (rmapp && n_rmap)
748 			kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
749 		ret = 0;
750 		goto out_unlock;
751 	}
752 	if (pmd_none(*pmd)) {
753 		if (!new_ptep)
754 			goto out_unlock;
755 		pmd_populate(kvm->mm, pmd, new_ptep);
756 		new_ptep = NULL;
757 	}
758 	ptep = pte_offset_kernel(pmd, gpa);
759 	if (pte_present(*ptep)) {
760 		/* Check if someone else set the same thing */
761 		if (pte_raw(*ptep) == pte_raw(pte)) {
762 			ret = 0;
763 			goto out_unlock;
764 		}
765 		/* Valid page here already, add our extra bits */
766 		WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
767 							PTE_BITS_MUST_MATCH);
768 		kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
769 		ret = 0;
770 		goto out_unlock;
771 	}
772 	kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
773 	if (rmapp && n_rmap)
774 		kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
775 	ret = 0;
776 
777  out_unlock:
778 	spin_unlock(&kvm->mmu_lock);
779 	if (new_pud)
780 		pud_free(kvm->mm, new_pud);
781 	if (new_pmd)
782 		kvmppc_pmd_free(new_pmd);
783 	if (new_ptep)
784 		kvmppc_pte_free(new_ptep);
785 	return ret;
786 }
787 
kvmppc_hv_handle_set_rc(struct kvm * kvm,bool nested,bool writing,unsigned long gpa,unsigned int lpid)788 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
789 			     unsigned long gpa, unsigned int lpid)
790 {
791 	unsigned long pgflags;
792 	unsigned int shift;
793 	pte_t *ptep;
794 
795 	/*
796 	 * Need to set an R or C bit in the 2nd-level tables;
797 	 * since we are just helping out the hardware here,
798 	 * it is sufficient to do what the hardware does.
799 	 */
800 	pgflags = _PAGE_ACCESSED;
801 	if (writing)
802 		pgflags |= _PAGE_DIRTY;
803 
804 	if (nested)
805 		ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
806 	else
807 		ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
808 
809 	if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
810 		kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
811 		return true;
812 	}
813 	return false;
814 }
815 
kvmppc_book3s_instantiate_page(struct kvm_vcpu * vcpu,unsigned long gpa,struct kvm_memory_slot * memslot,bool writing,bool kvm_ro,pte_t * inserted_pte,unsigned int * levelp)816 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
817 				   unsigned long gpa,
818 				   struct kvm_memory_slot *memslot,
819 				   bool writing, bool kvm_ro,
820 				   pte_t *inserted_pte, unsigned int *levelp)
821 {
822 	struct kvm *kvm = vcpu->kvm;
823 	struct page *page = NULL;
824 	unsigned long mmu_seq;
825 	unsigned long hva, gfn = gpa >> PAGE_SHIFT;
826 	bool upgrade_write = false;
827 	bool *upgrade_p = &upgrade_write;
828 	pte_t pte, *ptep;
829 	unsigned int shift, level;
830 	int ret;
831 	bool large_enable;
832 
833 	/* used to check for invalidations in progress */
834 	mmu_seq = kvm->mmu_invalidate_seq;
835 	smp_rmb();
836 
837 	/*
838 	 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
839 	 * do it with !atomic && !async, which is how we call it.
840 	 * We always ask for write permission since the common case
841 	 * is that the page is writable.
842 	 */
843 	hva = gfn_to_hva_memslot(memslot, gfn);
844 	if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
845 		upgrade_write = true;
846 	} else {
847 		unsigned long pfn;
848 
849 		/* Call KVM generic code to do the slow-path check */
850 		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
851 					   writing, upgrade_p, NULL);
852 		if (is_error_noslot_pfn(pfn))
853 			return -EFAULT;
854 		page = NULL;
855 		if (pfn_valid(pfn)) {
856 			page = pfn_to_page(pfn);
857 			if (PageReserved(page))
858 				page = NULL;
859 		}
860 	}
861 
862 	/*
863 	 * Read the PTE from the process' radix tree and use that
864 	 * so we get the shift and attribute bits.
865 	 */
866 	spin_lock(&kvm->mmu_lock);
867 	ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
868 	pte = __pte(0);
869 	if (ptep)
870 		pte = READ_ONCE(*ptep);
871 	spin_unlock(&kvm->mmu_lock);
872 	/*
873 	 * If the PTE disappeared temporarily due to a THP
874 	 * collapse, just return and let the guest try again.
875 	 */
876 	if (!pte_present(pte)) {
877 		if (page)
878 			put_page(page);
879 		return RESUME_GUEST;
880 	}
881 
882 	/* If we're logging dirty pages, always map single pages */
883 	large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
884 
885 	/* Get pte level from shift/size */
886 	if (large_enable && shift == PUD_SHIFT &&
887 	    (gpa & (PUD_SIZE - PAGE_SIZE)) ==
888 	    (hva & (PUD_SIZE - PAGE_SIZE))) {
889 		level = 2;
890 	} else if (large_enable && shift == PMD_SHIFT &&
891 		   (gpa & (PMD_SIZE - PAGE_SIZE)) ==
892 		   (hva & (PMD_SIZE - PAGE_SIZE))) {
893 		level = 1;
894 	} else {
895 		level = 0;
896 		if (shift > PAGE_SHIFT) {
897 			/*
898 			 * If the pte maps more than one page, bring over
899 			 * bits from the virtual address to get the real
900 			 * address of the specific single page we want.
901 			 */
902 			unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
903 			pte = __pte(pte_val(pte) | (hva & rpnmask));
904 		}
905 	}
906 
907 	pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
908 	if (writing || upgrade_write) {
909 		if (pte_val(pte) & _PAGE_WRITE)
910 			pte = __pte(pte_val(pte) | _PAGE_DIRTY);
911 	} else {
912 		pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
913 	}
914 
915 	/* Allocate space in the tree and write the PTE */
916 	ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
917 				mmu_seq, kvm->arch.lpid, NULL, NULL);
918 	if (inserted_pte)
919 		*inserted_pte = pte;
920 	if (levelp)
921 		*levelp = level;
922 
923 	if (page) {
924 		if (!ret && (pte_val(pte) & _PAGE_WRITE))
925 			set_page_dirty_lock(page);
926 		put_page(page);
927 	}
928 
929 	/* Increment number of large pages if we (successfully) inserted one */
930 	if (!ret) {
931 		if (level == 1)
932 			kvm->stat.num_2M_pages++;
933 		else if (level == 2)
934 			kvm->stat.num_1G_pages++;
935 	}
936 
937 	return ret;
938 }
939 
kvmppc_book3s_radix_page_fault(struct kvm_vcpu * vcpu,unsigned long ea,unsigned long dsisr)940 int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
941 				   unsigned long ea, unsigned long dsisr)
942 {
943 	struct kvm *kvm = vcpu->kvm;
944 	unsigned long gpa, gfn;
945 	struct kvm_memory_slot *memslot;
946 	long ret;
947 	bool writing = !!(dsisr & DSISR_ISSTORE);
948 	bool kvm_ro = false;
949 
950 	/* Check for unusual errors */
951 	if (dsisr & DSISR_UNSUPP_MMU) {
952 		pr_err("KVM: Got unsupported MMU fault\n");
953 		return -EFAULT;
954 	}
955 	if (dsisr & DSISR_BADACCESS) {
956 		/* Reflect to the guest as DSI */
957 		pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
958 		kvmppc_core_queue_data_storage(vcpu,
959 				kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
960 				ea, dsisr);
961 		return RESUME_GUEST;
962 	}
963 
964 	/* Translate the logical address */
965 	gpa = vcpu->arch.fault_gpa & ~0xfffUL;
966 	gpa &= ~0xF000000000000000ul;
967 	gfn = gpa >> PAGE_SHIFT;
968 	if (!(dsisr & DSISR_PRTABLE_FAULT))
969 		gpa |= ea & 0xfff;
970 
971 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
972 		return kvmppc_send_page_to_uv(kvm, gfn);
973 
974 	/* Get the corresponding memslot */
975 	memslot = gfn_to_memslot(kvm, gfn);
976 
977 	/* No memslot means it's an emulated MMIO region */
978 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
979 		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
980 			     DSISR_SET_RC)) {
981 			/*
982 			 * Bad address in guest page table tree, or other
983 			 * unusual error - reflect it to the guest as DSI.
984 			 */
985 			kvmppc_core_queue_data_storage(vcpu,
986 					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
987 					ea, dsisr);
988 			return RESUME_GUEST;
989 		}
990 		return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
991 	}
992 
993 	if (memslot->flags & KVM_MEM_READONLY) {
994 		if (writing) {
995 			/* give the guest a DSI */
996 			kvmppc_core_queue_data_storage(vcpu,
997 					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
998 					ea, DSISR_ISSTORE | DSISR_PROTFAULT);
999 			return RESUME_GUEST;
1000 		}
1001 		kvm_ro = true;
1002 	}
1003 
1004 	/* Failed to set the reference/change bits */
1005 	if (dsisr & DSISR_SET_RC) {
1006 		spin_lock(&kvm->mmu_lock);
1007 		if (kvmppc_hv_handle_set_rc(kvm, false, writing,
1008 					    gpa, kvm->arch.lpid))
1009 			dsisr &= ~DSISR_SET_RC;
1010 		spin_unlock(&kvm->mmu_lock);
1011 
1012 		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1013 			       DSISR_PROTFAULT | DSISR_SET_RC)))
1014 			return RESUME_GUEST;
1015 	}
1016 
1017 	/* Try to insert a pte */
1018 	ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
1019 					     kvm_ro, NULL, NULL);
1020 
1021 	if (ret == 0 || ret == -EAGAIN)
1022 		ret = RESUME_GUEST;
1023 	return ret;
1024 }
1025 
1026 /* Called with kvm->mmu_lock held */
kvm_unmap_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)1027 void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1028 		     unsigned long gfn)
1029 {
1030 	pte_t *ptep;
1031 	unsigned long gpa = gfn << PAGE_SHIFT;
1032 	unsigned int shift;
1033 
1034 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
1035 		uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
1036 		return;
1037 	}
1038 
1039 	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1040 	if (ptep && pte_present(*ptep))
1041 		kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1042 				 kvm->arch.lpid);
1043 }
1044 
1045 /* Called with kvm->mmu_lock held */
kvm_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)1046 bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1047 		   unsigned long gfn)
1048 {
1049 	pte_t *ptep;
1050 	unsigned long gpa = gfn << PAGE_SHIFT;
1051 	unsigned int shift;
1052 	bool ref = false;
1053 	unsigned long old, *rmapp;
1054 
1055 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1056 		return ref;
1057 
1058 	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1059 	if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
1060 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1061 					      gpa, shift);
1062 		/* XXX need to flush tlb here? */
1063 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1064 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1065 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1066 					       old & PTE_RPN_MASK,
1067 					       1UL << shift);
1068 		ref = true;
1069 	}
1070 	return ref;
1071 }
1072 
1073 /* Called with kvm->mmu_lock held */
kvm_test_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)1074 bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1075 			unsigned long gfn)
1076 
1077 {
1078 	pte_t *ptep;
1079 	unsigned long gpa = gfn << PAGE_SHIFT;
1080 	unsigned int shift;
1081 	bool ref = false;
1082 
1083 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1084 		return ref;
1085 
1086 	ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1087 	if (ptep && pte_present(*ptep) && pte_young(*ptep))
1088 		ref = true;
1089 	return ref;
1090 }
1091 
1092 /* Returns the number of PAGE_SIZE pages that are dirty */
kvm_radix_test_clear_dirty(struct kvm * kvm,struct kvm_memory_slot * memslot,int pagenum)1093 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1094 				struct kvm_memory_slot *memslot, int pagenum)
1095 {
1096 	unsigned long gfn = memslot->base_gfn + pagenum;
1097 	unsigned long gpa = gfn << PAGE_SHIFT;
1098 	pte_t *ptep, pte;
1099 	unsigned int shift;
1100 	int ret = 0;
1101 	unsigned long old, *rmapp;
1102 
1103 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1104 		return ret;
1105 
1106 	/*
1107 	 * For performance reasons we don't hold kvm->mmu_lock while walking the
1108 	 * partition scoped table.
1109 	 */
1110 	ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
1111 	if (!ptep)
1112 		return 0;
1113 
1114 	pte = READ_ONCE(*ptep);
1115 	if (pte_present(pte) && pte_dirty(pte)) {
1116 		spin_lock(&kvm->mmu_lock);
1117 		/*
1118 		 * Recheck the pte again
1119 		 */
1120 		if (pte_val(pte) != pte_val(*ptep)) {
1121 			/*
1122 			 * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1123 			 * only find PAGE_SIZE pte entries here. We can continue
1124 			 * to use the pte addr returned by above page table
1125 			 * walk.
1126 			 */
1127 			if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
1128 				spin_unlock(&kvm->mmu_lock);
1129 				return 0;
1130 			}
1131 		}
1132 
1133 		ret = 1;
1134 		VM_BUG_ON(shift);
1135 		old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1136 					      gpa, shift);
1137 		kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1138 		/* Also clear bit in ptes in shadow pgtable for nested guests */
1139 		rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1140 		kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1141 					       old & PTE_RPN_MASK,
1142 					       1UL << shift);
1143 		spin_unlock(&kvm->mmu_lock);
1144 	}
1145 	return ret;
1146 }
1147 
kvmppc_hv_get_dirty_log_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map)1148 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1149 			struct kvm_memory_slot *memslot, unsigned long *map)
1150 {
1151 	unsigned long i, j;
1152 	int npages;
1153 
1154 	for (i = 0; i < memslot->npages; i = j) {
1155 		npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1156 
1157 		/*
1158 		 * Note that if npages > 0 then i must be a multiple of npages,
1159 		 * since huge pages are only used to back the guest at guest
1160 		 * real addresses that are a multiple of their size.
1161 		 * Since we have at most one PTE covering any given guest
1162 		 * real address, if npages > 1 we can skip to i + npages.
1163 		 */
1164 		j = i + 1;
1165 		if (npages) {
1166 			set_dirty_bits(map, i, npages);
1167 			j = i + npages;
1168 		}
1169 	}
1170 	return 0;
1171 }
1172 
kvmppc_radix_flush_memslot(struct kvm * kvm,const struct kvm_memory_slot * memslot)1173 void kvmppc_radix_flush_memslot(struct kvm *kvm,
1174 				const struct kvm_memory_slot *memslot)
1175 {
1176 	unsigned long n;
1177 	pte_t *ptep;
1178 	unsigned long gpa;
1179 	unsigned int shift;
1180 
1181 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
1182 		kvmppc_uvmem_drop_pages(memslot, kvm, true);
1183 
1184 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1185 		return;
1186 
1187 	gpa = memslot->base_gfn << PAGE_SHIFT;
1188 	spin_lock(&kvm->mmu_lock);
1189 	for (n = memslot->npages; n; --n) {
1190 		ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1191 		if (ptep && pte_present(*ptep))
1192 			kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1193 					 kvm->arch.lpid);
1194 		gpa += PAGE_SIZE;
1195 	}
1196 	/*
1197 	 * Increase the mmu notifier sequence number to prevent any page
1198 	 * fault that read the memslot earlier from writing a PTE.
1199 	 */
1200 	kvm->mmu_invalidate_seq++;
1201 	spin_unlock(&kvm->mmu_lock);
1202 }
1203 
add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info * info,int psize,int * indexp)1204 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1205 				 int psize, int *indexp)
1206 {
1207 	if (!mmu_psize_defs[psize].shift)
1208 		return;
1209 	info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1210 		(mmu_psize_defs[psize].ap << 29);
1211 	++(*indexp);
1212 }
1213 
kvmhv_get_rmmu_info(struct kvm * kvm,struct kvm_ppc_rmmu_info * info)1214 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1215 {
1216 	int i;
1217 
1218 	if (!radix_enabled())
1219 		return -EINVAL;
1220 	memset(info, 0, sizeof(*info));
1221 
1222 	/* 4k page size */
1223 	info->geometries[0].page_shift = 12;
1224 	info->geometries[0].level_bits[0] = 9;
1225 	for (i = 1; i < 4; ++i)
1226 		info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1227 	/* 64k page size */
1228 	info->geometries[1].page_shift = 16;
1229 	for (i = 0; i < 4; ++i)
1230 		info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1231 
1232 	i = 0;
1233 	add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1234 	add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1235 	add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1236 	add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1237 
1238 	return 0;
1239 }
1240 
kvmppc_init_vm_radix(struct kvm * kvm)1241 int kvmppc_init_vm_radix(struct kvm *kvm)
1242 {
1243 	kvm->arch.pgtable = pgd_alloc(kvm->mm);
1244 	if (!kvm->arch.pgtable)
1245 		return -ENOMEM;
1246 	return 0;
1247 }
1248 
pte_ctor(void * addr)1249 static void pte_ctor(void *addr)
1250 {
1251 	memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1252 }
1253 
pmd_ctor(void * addr)1254 static void pmd_ctor(void *addr)
1255 {
1256 	memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1257 }
1258 
1259 struct debugfs_radix_state {
1260 	struct kvm	*kvm;
1261 	struct mutex	mutex;
1262 	unsigned long	gpa;
1263 	int		lpid;
1264 	int		chars_left;
1265 	int		buf_index;
1266 	char		buf[128];
1267 	u8		hdr;
1268 };
1269 
debugfs_radix_open(struct inode * inode,struct file * file)1270 static int debugfs_radix_open(struct inode *inode, struct file *file)
1271 {
1272 	struct kvm *kvm = inode->i_private;
1273 	struct debugfs_radix_state *p;
1274 
1275 	p = kzalloc(sizeof(*p), GFP_KERNEL);
1276 	if (!p)
1277 		return -ENOMEM;
1278 
1279 	kvm_get_kvm(kvm);
1280 	p->kvm = kvm;
1281 	mutex_init(&p->mutex);
1282 	file->private_data = p;
1283 
1284 	return nonseekable_open(inode, file);
1285 }
1286 
debugfs_radix_release(struct inode * inode,struct file * file)1287 static int debugfs_radix_release(struct inode *inode, struct file *file)
1288 {
1289 	struct debugfs_radix_state *p = file->private_data;
1290 
1291 	kvm_put_kvm(p->kvm);
1292 	kfree(p);
1293 	return 0;
1294 }
1295 
debugfs_radix_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)1296 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1297 				 size_t len, loff_t *ppos)
1298 {
1299 	struct debugfs_radix_state *p = file->private_data;
1300 	ssize_t ret, r;
1301 	unsigned long n;
1302 	struct kvm *kvm;
1303 	unsigned long gpa;
1304 	pgd_t *pgt;
1305 	struct kvm_nested_guest *nested;
1306 	pgd_t *pgdp;
1307 	p4d_t p4d, *p4dp;
1308 	pud_t pud, *pudp;
1309 	pmd_t pmd, *pmdp;
1310 	pte_t *ptep;
1311 	int shift;
1312 	unsigned long pte;
1313 
1314 	kvm = p->kvm;
1315 	if (!kvm_is_radix(kvm))
1316 		return 0;
1317 
1318 	ret = mutex_lock_interruptible(&p->mutex);
1319 	if (ret)
1320 		return ret;
1321 
1322 	if (p->chars_left) {
1323 		n = p->chars_left;
1324 		if (n > len)
1325 			n = len;
1326 		r = copy_to_user(buf, p->buf + p->buf_index, n);
1327 		n -= r;
1328 		p->chars_left -= n;
1329 		p->buf_index += n;
1330 		buf += n;
1331 		len -= n;
1332 		ret = n;
1333 		if (r) {
1334 			if (!n)
1335 				ret = -EFAULT;
1336 			goto out;
1337 		}
1338 	}
1339 
1340 	gpa = p->gpa;
1341 	nested = NULL;
1342 	pgt = NULL;
1343 	while (len != 0 && p->lpid >= 0) {
1344 		if (gpa >= RADIX_PGTABLE_RANGE) {
1345 			gpa = 0;
1346 			pgt = NULL;
1347 			if (nested) {
1348 				kvmhv_put_nested(nested);
1349 				nested = NULL;
1350 			}
1351 			p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1352 			p->hdr = 0;
1353 			if (p->lpid < 0)
1354 				break;
1355 		}
1356 		if (!pgt) {
1357 			if (p->lpid == 0) {
1358 				pgt = kvm->arch.pgtable;
1359 			} else {
1360 				nested = kvmhv_get_nested(kvm, p->lpid, false);
1361 				if (!nested) {
1362 					gpa = RADIX_PGTABLE_RANGE;
1363 					continue;
1364 				}
1365 				pgt = nested->shadow_pgtable;
1366 			}
1367 		}
1368 		n = 0;
1369 		if (!p->hdr) {
1370 			if (p->lpid > 0)
1371 				n = scnprintf(p->buf, sizeof(p->buf),
1372 					      "\nNested LPID %d: ", p->lpid);
1373 			n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1374 				      "pgdir: %lx\n", (unsigned long)pgt);
1375 			p->hdr = 1;
1376 			goto copy;
1377 		}
1378 
1379 		pgdp = pgt + pgd_index(gpa);
1380 		p4dp = p4d_offset(pgdp, gpa);
1381 		p4d = READ_ONCE(*p4dp);
1382 		if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
1383 			gpa = (gpa & P4D_MASK) + P4D_SIZE;
1384 			continue;
1385 		}
1386 
1387 		pudp = pud_offset(&p4d, gpa);
1388 		pud = READ_ONCE(*pudp);
1389 		if (!(pud_val(pud) & _PAGE_PRESENT)) {
1390 			gpa = (gpa & PUD_MASK) + PUD_SIZE;
1391 			continue;
1392 		}
1393 		if (pud_val(pud) & _PAGE_PTE) {
1394 			pte = pud_val(pud);
1395 			shift = PUD_SHIFT;
1396 			goto leaf;
1397 		}
1398 
1399 		pmdp = pmd_offset(&pud, gpa);
1400 		pmd = READ_ONCE(*pmdp);
1401 		if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1402 			gpa = (gpa & PMD_MASK) + PMD_SIZE;
1403 			continue;
1404 		}
1405 		if (pmd_val(pmd) & _PAGE_PTE) {
1406 			pte = pmd_val(pmd);
1407 			shift = PMD_SHIFT;
1408 			goto leaf;
1409 		}
1410 
1411 		ptep = pte_offset_kernel(&pmd, gpa);
1412 		pte = pte_val(READ_ONCE(*ptep));
1413 		if (!(pte & _PAGE_PRESENT)) {
1414 			gpa += PAGE_SIZE;
1415 			continue;
1416 		}
1417 		shift = PAGE_SHIFT;
1418 	leaf:
1419 		n = scnprintf(p->buf, sizeof(p->buf),
1420 			      " %lx: %lx %d\n", gpa, pte, shift);
1421 		gpa += 1ul << shift;
1422 	copy:
1423 		p->chars_left = n;
1424 		if (n > len)
1425 			n = len;
1426 		r = copy_to_user(buf, p->buf, n);
1427 		n -= r;
1428 		p->chars_left -= n;
1429 		p->buf_index = n;
1430 		buf += n;
1431 		len -= n;
1432 		ret += n;
1433 		if (r) {
1434 			if (!ret)
1435 				ret = -EFAULT;
1436 			break;
1437 		}
1438 	}
1439 	p->gpa = gpa;
1440 	if (nested)
1441 		kvmhv_put_nested(nested);
1442 
1443  out:
1444 	mutex_unlock(&p->mutex);
1445 	return ret;
1446 }
1447 
debugfs_radix_write(struct file * file,const char __user * buf,size_t len,loff_t * ppos)1448 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1449 			   size_t len, loff_t *ppos)
1450 {
1451 	return -EACCES;
1452 }
1453 
1454 static const struct file_operations debugfs_radix_fops = {
1455 	.owner	 = THIS_MODULE,
1456 	.open	 = debugfs_radix_open,
1457 	.release = debugfs_radix_release,
1458 	.read	 = debugfs_radix_read,
1459 	.write	 = debugfs_radix_write,
1460 	.llseek	 = generic_file_llseek,
1461 };
1462 
kvmhv_radix_debugfs_init(struct kvm * kvm)1463 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1464 {
1465 	debugfs_create_file("radix", 0400, kvm->debugfs_dentry, kvm,
1466 			    &debugfs_radix_fops);
1467 }
1468 
kvmppc_radix_init(void)1469 int kvmppc_radix_init(void)
1470 {
1471 	unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1472 
1473 	kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1474 	if (!kvm_pte_cache)
1475 		return -ENOMEM;
1476 
1477 	size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1478 
1479 	kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1480 	if (!kvm_pmd_cache) {
1481 		kmem_cache_destroy(kvm_pte_cache);
1482 		return -ENOMEM;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
kvmppc_radix_exit(void)1488 void kvmppc_radix_exit(void)
1489 {
1490 	kmem_cache_destroy(kvm_pte_cache);
1491 	kmem_cache_destroy(kvm_pmd_cache);
1492 }
1493