xref: /openbmc/linux/arch/riscv/kvm/mmu.c (revision 240e6d25)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/hugetlb.h>
13 #include <linux/module.h>
14 #include <linux/uaccess.h>
15 #include <linux/vmalloc.h>
16 #include <linux/kvm_host.h>
17 #include <linux/sched/signal.h>
18 #include <asm/csr.h>
19 #include <asm/page.h>
20 #include <asm/pgtable.h>
21 #include <asm/sbi.h>
22 
23 #ifdef CONFIG_64BIT
24 static unsigned long stage2_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
25 static unsigned long stage2_pgd_levels = 3;
26 #define stage2_index_bits	9
27 #else
28 static unsigned long stage2_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
29 static unsigned long stage2_pgd_levels = 2;
30 #define stage2_index_bits	10
31 #endif
32 
33 #define stage2_pgd_xbits	2
34 #define stage2_pgd_size	(1UL << (HGATP_PAGE_SHIFT + stage2_pgd_xbits))
35 #define stage2_gpa_bits	(HGATP_PAGE_SHIFT + \
36 			 (stage2_pgd_levels * stage2_index_bits) + \
37 			 stage2_pgd_xbits)
38 #define stage2_gpa_size	((gpa_t)(1ULL << stage2_gpa_bits))
39 
40 #define stage2_pte_leaf(__ptep)	\
41 	(pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
42 
43 static inline unsigned long stage2_pte_index(gpa_t addr, u32 level)
44 {
45 	unsigned long mask;
46 	unsigned long shift = HGATP_PAGE_SHIFT + (stage2_index_bits * level);
47 
48 	if (level == (stage2_pgd_levels - 1))
49 		mask = (PTRS_PER_PTE * (1UL << stage2_pgd_xbits)) - 1;
50 	else
51 		mask = PTRS_PER_PTE - 1;
52 
53 	return (addr >> shift) & mask;
54 }
55 
56 static inline unsigned long stage2_pte_page_vaddr(pte_t pte)
57 {
58 	return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT);
59 }
60 
61 static int stage2_page_size_to_level(unsigned long page_size, u32 *out_level)
62 {
63 	u32 i;
64 	unsigned long psz = 1UL << 12;
65 
66 	for (i = 0; i < stage2_pgd_levels; i++) {
67 		if (page_size == (psz << (i * stage2_index_bits))) {
68 			*out_level = i;
69 			return 0;
70 		}
71 	}
72 
73 	return -EINVAL;
74 }
75 
76 static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize)
77 {
78 	if (stage2_pgd_levels < level)
79 		return -EINVAL;
80 
81 	*out_pgsize = 1UL << (12 + (level * stage2_index_bits));
82 
83 	return 0;
84 }
85 
86 static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache,
87 			      int min, int max)
88 {
89 	void *page;
90 
91 	BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS);
92 	if (pcache->nobjs >= min)
93 		return 0;
94 	while (pcache->nobjs < max) {
95 		page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
96 		if (!page)
97 			return -ENOMEM;
98 		pcache->objects[pcache->nobjs++] = page;
99 	}
100 
101 	return 0;
102 }
103 
104 static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache)
105 {
106 	while (pcache && pcache->nobjs)
107 		free_page((unsigned long)pcache->objects[--pcache->nobjs]);
108 }
109 
110 static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache)
111 {
112 	void *p;
113 
114 	if (!pcache)
115 		return NULL;
116 
117 	BUG_ON(!pcache->nobjs);
118 	p = pcache->objects[--pcache->nobjs];
119 
120 	return p;
121 }
122 
123 static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
124 				  pte_t **ptepp, u32 *ptep_level)
125 {
126 	pte_t *ptep;
127 	u32 current_level = stage2_pgd_levels - 1;
128 
129 	*ptep_level = current_level;
130 	ptep = (pte_t *)kvm->arch.pgd;
131 	ptep = &ptep[stage2_pte_index(addr, current_level)];
132 	while (ptep && pte_val(*ptep)) {
133 		if (stage2_pte_leaf(ptep)) {
134 			*ptep_level = current_level;
135 			*ptepp = ptep;
136 			return true;
137 		}
138 
139 		if (current_level) {
140 			current_level--;
141 			*ptep_level = current_level;
142 			ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
143 			ptep = &ptep[stage2_pte_index(addr, current_level)];
144 		} else {
145 			ptep = NULL;
146 		}
147 	}
148 
149 	return false;
150 }
151 
152 static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
153 {
154 	struct cpumask hmask;
155 	unsigned long size = PAGE_SIZE;
156 	struct kvm_vmid *vmid = &kvm->arch.vmid;
157 
158 	if (stage2_level_to_page_size(level, &size))
159 		return;
160 	addr &= ~(size - 1);
161 
162 	/*
163 	 * TODO: Instead of cpu_online_mask, we should only target CPUs
164 	 * where the Guest/VM is running.
165 	 */
166 	preempt_disable();
167 	riscv_cpuid_to_hartid_mask(cpu_online_mask, &hmask);
168 	sbi_remote_hfence_gvma_vmid(cpumask_bits(&hmask), addr, size,
169 				    READ_ONCE(vmid->vmid));
170 	preempt_enable();
171 }
172 
173 static int stage2_set_pte(struct kvm *kvm, u32 level,
174 			   struct kvm_mmu_page_cache *pcache,
175 			   gpa_t addr, const pte_t *new_pte)
176 {
177 	u32 current_level = stage2_pgd_levels - 1;
178 	pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
179 	pte_t *ptep = &next_ptep[stage2_pte_index(addr, current_level)];
180 
181 	if (current_level < level)
182 		return -EINVAL;
183 
184 	while (current_level != level) {
185 		if (stage2_pte_leaf(ptep))
186 			return -EEXIST;
187 
188 		if (!pte_val(*ptep)) {
189 			next_ptep = stage2_cache_alloc(pcache);
190 			if (!next_ptep)
191 				return -ENOMEM;
192 			*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
193 					__pgprot(_PAGE_TABLE));
194 		} else {
195 			if (stage2_pte_leaf(ptep))
196 				return -EEXIST;
197 			next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
198 		}
199 
200 		current_level--;
201 		ptep = &next_ptep[stage2_pte_index(addr, current_level)];
202 	}
203 
204 	*ptep = *new_pte;
205 	if (stage2_pte_leaf(ptep))
206 		stage2_remote_tlb_flush(kvm, current_level, addr);
207 
208 	return 0;
209 }
210 
211 static int stage2_map_page(struct kvm *kvm,
212 			   struct kvm_mmu_page_cache *pcache,
213 			   gpa_t gpa, phys_addr_t hpa,
214 			   unsigned long page_size,
215 			   bool page_rdonly, bool page_exec)
216 {
217 	int ret;
218 	u32 level = 0;
219 	pte_t new_pte;
220 	pgprot_t prot;
221 
222 	ret = stage2_page_size_to_level(page_size, &level);
223 	if (ret)
224 		return ret;
225 
226 	/*
227 	 * A RISC-V implementation can choose to either:
228 	 * 1) Update 'A' and 'D' PTE bits in hardware
229 	 * 2) Generate page fault when 'A' and/or 'D' bits are not set
230 	 *    PTE so that software can update these bits.
231 	 *
232 	 * We support both options mentioned above. To achieve this, we
233 	 * always set 'A' and 'D' PTE bits at time of creating stage2
234 	 * mapping. To support KVM dirty page logging with both options
235 	 * mentioned above, we will write-protect stage2 PTEs to track
236 	 * dirty pages.
237 	 */
238 
239 	if (page_exec) {
240 		if (page_rdonly)
241 			prot = PAGE_READ_EXEC;
242 		else
243 			prot = PAGE_WRITE_EXEC;
244 	} else {
245 		if (page_rdonly)
246 			prot = PAGE_READ;
247 		else
248 			prot = PAGE_WRITE;
249 	}
250 	new_pte = pfn_pte(PFN_DOWN(hpa), prot);
251 	new_pte = pte_mkdirty(new_pte);
252 
253 	return stage2_set_pte(kvm, level, pcache, gpa, &new_pte);
254 }
255 
256 enum stage2_op {
257 	STAGE2_OP_NOP = 0,	/* Nothing */
258 	STAGE2_OP_CLEAR,	/* Clear/Unmap */
259 	STAGE2_OP_WP,		/* Write-protect */
260 };
261 
262 static void stage2_op_pte(struct kvm *kvm, gpa_t addr,
263 			  pte_t *ptep, u32 ptep_level, enum stage2_op op)
264 {
265 	int i, ret;
266 	pte_t *next_ptep;
267 	u32 next_ptep_level;
268 	unsigned long next_page_size, page_size;
269 
270 	ret = stage2_level_to_page_size(ptep_level, &page_size);
271 	if (ret)
272 		return;
273 
274 	BUG_ON(addr & (page_size - 1));
275 
276 	if (!pte_val(*ptep))
277 		return;
278 
279 	if (ptep_level && !stage2_pte_leaf(ptep)) {
280 		next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
281 		next_ptep_level = ptep_level - 1;
282 		ret = stage2_level_to_page_size(next_ptep_level,
283 						&next_page_size);
284 		if (ret)
285 			return;
286 
287 		if (op == STAGE2_OP_CLEAR)
288 			set_pte(ptep, __pte(0));
289 		for (i = 0; i < PTRS_PER_PTE; i++)
290 			stage2_op_pte(kvm, addr + i * next_page_size,
291 					&next_ptep[i], next_ptep_level, op);
292 		if (op == STAGE2_OP_CLEAR)
293 			put_page(virt_to_page(next_ptep));
294 	} else {
295 		if (op == STAGE2_OP_CLEAR)
296 			set_pte(ptep, __pte(0));
297 		else if (op == STAGE2_OP_WP)
298 			set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
299 		stage2_remote_tlb_flush(kvm, ptep_level, addr);
300 	}
301 }
302 
303 static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
304 			       gpa_t size, bool may_block)
305 {
306 	int ret;
307 	pte_t *ptep;
308 	u32 ptep_level;
309 	bool found_leaf;
310 	unsigned long page_size;
311 	gpa_t addr = start, end = start + size;
312 
313 	while (addr < end) {
314 		found_leaf = stage2_get_leaf_entry(kvm, addr,
315 						   &ptep, &ptep_level);
316 		ret = stage2_level_to_page_size(ptep_level, &page_size);
317 		if (ret)
318 			break;
319 
320 		if (!found_leaf)
321 			goto next;
322 
323 		if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
324 			stage2_op_pte(kvm, addr, ptep,
325 				      ptep_level, STAGE2_OP_CLEAR);
326 
327 next:
328 		addr += page_size;
329 
330 		/*
331 		 * If the range is too large, release the kvm->mmu_lock
332 		 * to prevent starvation and lockup detector warnings.
333 		 */
334 		if (may_block && addr < end)
335 			cond_resched_lock(&kvm->mmu_lock);
336 	}
337 }
338 
339 static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
340 {
341 	int ret;
342 	pte_t *ptep;
343 	u32 ptep_level;
344 	bool found_leaf;
345 	gpa_t addr = start;
346 	unsigned long page_size;
347 
348 	while (addr < end) {
349 		found_leaf = stage2_get_leaf_entry(kvm, addr,
350 						   &ptep, &ptep_level);
351 		ret = stage2_level_to_page_size(ptep_level, &page_size);
352 		if (ret)
353 			break;
354 
355 		if (!found_leaf)
356 			goto next;
357 
358 		if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
359 			stage2_op_pte(kvm, addr, ptep,
360 				      ptep_level, STAGE2_OP_WP);
361 
362 next:
363 		addr += page_size;
364 	}
365 }
366 
367 static void stage2_wp_memory_region(struct kvm *kvm, int slot)
368 {
369 	struct kvm_memslots *slots = kvm_memslots(kvm);
370 	struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
371 	phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
372 	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
373 
374 	spin_lock(&kvm->mmu_lock);
375 	stage2_wp_range(kvm, start, end);
376 	spin_unlock(&kvm->mmu_lock);
377 	kvm_flush_remote_tlbs(kvm);
378 }
379 
380 static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
381 			  unsigned long size, bool writable)
382 {
383 	pte_t pte;
384 	int ret = 0;
385 	unsigned long pfn;
386 	phys_addr_t addr, end;
387 	struct kvm_mmu_page_cache pcache = { 0, };
388 
389 	end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
390 	pfn = __phys_to_pfn(hpa);
391 
392 	for (addr = gpa; addr < end; addr += PAGE_SIZE) {
393 		pte = pfn_pte(pfn, PAGE_KERNEL);
394 
395 		if (!writable)
396 			pte = pte_wrprotect(pte);
397 
398 		ret = stage2_cache_topup(&pcache,
399 					 stage2_pgd_levels,
400 					 KVM_MMU_PAGE_CACHE_NR_OBJS);
401 		if (ret)
402 			goto out;
403 
404 		spin_lock(&kvm->mmu_lock);
405 		ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte);
406 		spin_unlock(&kvm->mmu_lock);
407 		if (ret)
408 			goto out;
409 
410 		pfn++;
411 	}
412 
413 out:
414 	stage2_cache_flush(&pcache);
415 	return ret;
416 }
417 
418 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
419 					     struct kvm_memory_slot *slot,
420 					     gfn_t gfn_offset,
421 					     unsigned long mask)
422 {
423 	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
424 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
425 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
426 
427 	stage2_wp_range(kvm, start, end);
428 }
429 
430 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
431 {
432 }
433 
434 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
435 					const struct kvm_memory_slot *memslot)
436 {
437 	kvm_flush_remote_tlbs(kvm);
438 }
439 
440 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
441 {
442 }
443 
444 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
445 {
446 }
447 
448 void kvm_arch_flush_shadow_all(struct kvm *kvm)
449 {
450 	kvm_riscv_stage2_free_pgd(kvm);
451 }
452 
453 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
454 				   struct kvm_memory_slot *slot)
455 {
456 }
457 
458 void kvm_arch_commit_memory_region(struct kvm *kvm,
459 				const struct kvm_userspace_memory_region *mem,
460 				struct kvm_memory_slot *old,
461 				const struct kvm_memory_slot *new,
462 				enum kvm_mr_change change)
463 {
464 	/*
465 	 * At this point memslot has been committed and there is an
466 	 * allocated dirty_bitmap[], dirty pages will be tracked while
467 	 * the memory slot is write protected.
468 	 */
469 	if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
470 		stage2_wp_memory_region(kvm, mem->slot);
471 }
472 
473 int kvm_arch_prepare_memory_region(struct kvm *kvm,
474 				struct kvm_memory_slot *memslot,
475 				const struct kvm_userspace_memory_region *mem,
476 				enum kvm_mr_change change)
477 {
478 	hva_t hva = mem->userspace_addr;
479 	hva_t reg_end = hva + mem->memory_size;
480 	bool writable = !(mem->flags & KVM_MEM_READONLY);
481 	int ret = 0;
482 
483 	if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
484 			change != KVM_MR_FLAGS_ONLY)
485 		return 0;
486 
487 	/*
488 	 * Prevent userspace from creating a memory region outside of the GPA
489 	 * space addressable by the KVM guest GPA space.
490 	 */
491 	if ((memslot->base_gfn + memslot->npages) >=
492 	    (stage2_gpa_size >> PAGE_SHIFT))
493 		return -EFAULT;
494 
495 	mmap_read_lock(current->mm);
496 
497 	/*
498 	 * A memory region could potentially cover multiple VMAs, and
499 	 * any holes between them, so iterate over all of them to find
500 	 * out if we can map any of them right now.
501 	 *
502 	 *     +--------------------------------------------+
503 	 * +---------------+----------------+   +----------------+
504 	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
505 	 * +---------------+----------------+   +----------------+
506 	 *     |               memory region                |
507 	 *     +--------------------------------------------+
508 	 */
509 	do {
510 		struct vm_area_struct *vma = find_vma(current->mm, hva);
511 		hva_t vm_start, vm_end;
512 
513 		if (!vma || vma->vm_start >= reg_end)
514 			break;
515 
516 		/*
517 		 * Mapping a read-only VMA is only allowed if the
518 		 * memory region is configured as read-only.
519 		 */
520 		if (writable && !(vma->vm_flags & VM_WRITE)) {
521 			ret = -EPERM;
522 			break;
523 		}
524 
525 		/* Take the intersection of this VMA with the memory region */
526 		vm_start = max(hva, vma->vm_start);
527 		vm_end = min(reg_end, vma->vm_end);
528 
529 		if (vma->vm_flags & VM_PFNMAP) {
530 			gpa_t gpa = mem->guest_phys_addr +
531 				    (vm_start - mem->userspace_addr);
532 			phys_addr_t pa;
533 
534 			pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
535 			pa += vm_start - vma->vm_start;
536 
537 			/* IO region dirty page logging not allowed */
538 			if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
539 				ret = -EINVAL;
540 				goto out;
541 			}
542 
543 			ret = stage2_ioremap(kvm, gpa, pa,
544 					     vm_end - vm_start, writable);
545 			if (ret)
546 				break;
547 		}
548 		hva = vm_end;
549 	} while (hva < reg_end);
550 
551 	if (change == KVM_MR_FLAGS_ONLY)
552 		goto out;
553 
554 	spin_lock(&kvm->mmu_lock);
555 	if (ret)
556 		stage2_unmap_range(kvm, mem->guest_phys_addr,
557 				   mem->memory_size, false);
558 	spin_unlock(&kvm->mmu_lock);
559 
560 out:
561 	mmap_read_unlock(current->mm);
562 	return ret;
563 }
564 
565 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
566 {
567 	if (!kvm->arch.pgd)
568 		return false;
569 
570 	stage2_unmap_range(kvm, range->start << PAGE_SHIFT,
571 			   (range->end - range->start) << PAGE_SHIFT,
572 			   range->may_block);
573 	return false;
574 }
575 
576 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
577 {
578 	int ret;
579 	kvm_pfn_t pfn = pte_pfn(range->pte);
580 
581 	if (!kvm->arch.pgd)
582 		return false;
583 
584 	WARN_ON(range->end - range->start != 1);
585 
586 	ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT,
587 			      __pfn_to_phys(pfn), PAGE_SIZE, true, true);
588 	if (ret) {
589 		kvm_debug("Failed to map stage2 page (error %d)\n", ret);
590 		return true;
591 	}
592 
593 	return false;
594 }
595 
596 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
597 {
598 	pte_t *ptep;
599 	u32 ptep_level = 0;
600 	u64 size = (range->end - range->start) << PAGE_SHIFT;
601 
602 	if (!kvm->arch.pgd)
603 		return false;
604 
605 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);
606 
607 	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
608 				   &ptep, &ptep_level))
609 		return false;
610 
611 	return ptep_test_and_clear_young(NULL, 0, ptep);
612 }
613 
614 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
615 {
616 	pte_t *ptep;
617 	u32 ptep_level = 0;
618 	u64 size = (range->end - range->start) << PAGE_SHIFT;
619 
620 	if (!kvm->arch.pgd)
621 		return false;
622 
623 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);
624 
625 	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
626 				   &ptep, &ptep_level))
627 		return false;
628 
629 	return pte_young(*ptep);
630 }
631 
632 int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
633 			 struct kvm_memory_slot *memslot,
634 			 gpa_t gpa, unsigned long hva, bool is_write)
635 {
636 	int ret;
637 	kvm_pfn_t hfn;
638 	bool writeable;
639 	short vma_pageshift;
640 	gfn_t gfn = gpa >> PAGE_SHIFT;
641 	struct vm_area_struct *vma;
642 	struct kvm *kvm = vcpu->kvm;
643 	struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache;
644 	bool logging = (memslot->dirty_bitmap &&
645 			!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
646 	unsigned long vma_pagesize, mmu_seq;
647 
648 	mmap_read_lock(current->mm);
649 
650 	vma = find_vma_intersection(current->mm, hva, hva + 1);
651 	if (unlikely(!vma)) {
652 		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
653 		mmap_read_unlock(current->mm);
654 		return -EFAULT;
655 	}
656 
657 	if (is_vm_hugetlb_page(vma))
658 		vma_pageshift = huge_page_shift(hstate_vma(vma));
659 	else
660 		vma_pageshift = PAGE_SHIFT;
661 	vma_pagesize = 1ULL << vma_pageshift;
662 	if (logging || (vma->vm_flags & VM_PFNMAP))
663 		vma_pagesize = PAGE_SIZE;
664 
665 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE)
666 		gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
667 
668 	mmap_read_unlock(current->mm);
669 
670 	if (vma_pagesize != PGDIR_SIZE &&
671 	    vma_pagesize != PMD_SIZE &&
672 	    vma_pagesize != PAGE_SIZE) {
673 		kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize);
674 		return -EFAULT;
675 	}
676 
677 	/* We need minimum second+third level pages */
678 	ret = stage2_cache_topup(pcache, stage2_pgd_levels,
679 				 KVM_MMU_PAGE_CACHE_NR_OBJS);
680 	if (ret) {
681 		kvm_err("Failed to topup stage2 cache\n");
682 		return ret;
683 	}
684 
685 	mmu_seq = kvm->mmu_notifier_seq;
686 
687 	hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable);
688 	if (hfn == KVM_PFN_ERR_HWPOISON) {
689 		send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
690 				vma_pageshift, current);
691 		return 0;
692 	}
693 	if (is_error_noslot_pfn(hfn))
694 		return -EFAULT;
695 
696 	/*
697 	 * If logging is active then we allow writable pages only
698 	 * for write faults.
699 	 */
700 	if (logging && !is_write)
701 		writeable = false;
702 
703 	spin_lock(&kvm->mmu_lock);
704 
705 	if (mmu_notifier_retry(kvm, mmu_seq))
706 		goto out_unlock;
707 
708 	if (writeable) {
709 		kvm_set_pfn_dirty(hfn);
710 		mark_page_dirty(kvm, gfn);
711 		ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
712 				      vma_pagesize, false, true);
713 	} else {
714 		ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
715 				      vma_pagesize, true, true);
716 	}
717 
718 	if (ret)
719 		kvm_err("Failed to map in stage2\n");
720 
721 out_unlock:
722 	spin_unlock(&kvm->mmu_lock);
723 	kvm_set_pfn_accessed(hfn);
724 	kvm_release_pfn_clean(hfn);
725 	return ret;
726 }
727 
728 void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu)
729 {
730 	stage2_cache_flush(&vcpu->arch.mmu_page_cache);
731 }
732 
733 int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
734 {
735 	struct page *pgd_page;
736 
737 	if (kvm->arch.pgd != NULL) {
738 		kvm_err("kvm_arch already initialized?\n");
739 		return -EINVAL;
740 	}
741 
742 	pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
743 				get_order(stage2_pgd_size));
744 	if (!pgd_page)
745 		return -ENOMEM;
746 	kvm->arch.pgd = page_to_virt(pgd_page);
747 	kvm->arch.pgd_phys = page_to_phys(pgd_page);
748 
749 	return 0;
750 }
751 
752 void kvm_riscv_stage2_free_pgd(struct kvm *kvm)
753 {
754 	void *pgd = NULL;
755 
756 	spin_lock(&kvm->mmu_lock);
757 	if (kvm->arch.pgd) {
758 		stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false);
759 		pgd = READ_ONCE(kvm->arch.pgd);
760 		kvm->arch.pgd = NULL;
761 		kvm->arch.pgd_phys = 0;
762 	}
763 	spin_unlock(&kvm->mmu_lock);
764 
765 	if (pgd)
766 		free_pages((unsigned long)pgd, get_order(stage2_pgd_size));
767 }
768 
769 void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu)
770 {
771 	unsigned long hgatp = stage2_mode;
772 	struct kvm_arch *k = &vcpu->kvm->arch;
773 
774 	hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) &
775 		 HGATP_VMID_MASK;
776 	hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
777 
778 	csr_write(CSR_HGATP, hgatp);
779 
780 	if (!kvm_riscv_stage2_vmid_bits())
781 		__kvm_riscv_hfence_gvma_all();
782 }
783 
784 void kvm_riscv_stage2_mode_detect(void)
785 {
786 #ifdef CONFIG_64BIT
787 	/* Try Sv48x4 stage2 mode */
788 	csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
789 	if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
790 		stage2_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
791 		stage2_pgd_levels = 4;
792 	}
793 	csr_write(CSR_HGATP, 0);
794 
795 	__kvm_riscv_hfence_gvma_all();
796 #endif
797 }
798 
799 unsigned long kvm_riscv_stage2_mode(void)
800 {
801 	return stage2_mode >> HGATP_MODE_SHIFT;
802 }
803