Lines Matching +full:gpa +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
47 if (level == (gstage_pgd_levels - 1)) in gstage_pte_index()
48 mask = (PTRS_PER_PTE * (1UL << gstage_pgd_xbits)) - 1; in gstage_pte_index()
50 mask = PTRS_PER_PTE - 1; in gstage_pte_index()
65 for (i = 0; i < gstage_pgd_levels; i++) { in gstage_page_size_to_level()
68 return 0; in gstage_page_size_to_level()
72 return -EINVAL; in gstage_page_size_to_level()
78 return -EINVAL; in gstage_level_to_page_order()
81 return 0; in gstage_level_to_page_order()
94 return 0; in gstage_level_to_page_size()
101 u32 current_level = gstage_pgd_levels - 1; in gstage_get_leaf_entry()
104 ptep = (pte_t *)kvm->arch.pgd; in gstage_get_leaf_entry()
114 current_level--; in gstage_get_leaf_entry()
132 addr &= ~(BIT(order) - 1); in gstage_remote_tlb_flush()
134 kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order); in gstage_remote_tlb_flush()
141 u32 current_level = gstage_pgd_levels - 1; in gstage_set_pte()
142 pte_t *next_ptep = (pte_t *)kvm->arch.pgd; in gstage_set_pte()
146 return -EINVAL; in gstage_set_pte()
150 return -EEXIST; in gstage_set_pte()
154 return -ENOMEM; in gstage_set_pte()
157 return -ENOMEM; in gstage_set_pte()
162 return -EEXIST; in gstage_set_pte()
166 current_level--; in gstage_set_pte()
174 return 0; in gstage_set_pte()
179 gpa_t gpa, phys_addr_t hpa, in gstage_map_page() argument
184 u32 level = 0; in gstage_map_page()
193 * A RISC-V implementation can choose to either: in gstage_map_page()
199 * always set 'A' and 'D' PTE bits at time of creating G-stage in gstage_map_page()
201 * mentioned above, we will write-protect G-stage PTEs to track in gstage_map_page()
219 return gstage_set_pte(kvm, level, pcache, gpa, &new_pte); in gstage_map_page()
223 GSTAGE_OP_NOP = 0, /* Nothing */
225 GSTAGE_OP_WP, /* Write-protect */
240 BUG_ON(addr & (page_size - 1)); in gstage_op_pte()
247 next_ptep_level = ptep_level - 1; in gstage_op_pte()
254 set_pte(ptep, __pte(0)); in gstage_op_pte()
255 for (i = 0; i < PTRS_PER_PTE; i++) in gstage_op_pte()
262 set_pte(ptep, __pte(0)); in gstage_op_pte()
289 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) in gstage_unmap_range()
297 * If the range is too large, release the kvm->mmu_lock in gstage_unmap_range()
301 cond_resched_lock(&kvm->mmu_lock); in gstage_unmap_range()
324 if (!(addr & (page_size - 1)) && ((end - addr) >= page_size)) in gstage_wp_range()
337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in gstage_wp_memory_region()
338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in gstage_wp_memory_region()
340 spin_lock(&kvm->mmu_lock); in gstage_wp_memory_region()
342 spin_unlock(&kvm->mmu_lock); in gstage_wp_memory_region()
346 int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, in kvm_riscv_gstage_ioremap() argument
351 int ret = 0; in kvm_riscv_gstage_ioremap()
355 .gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0, in kvm_riscv_gstage_ioremap()
359 end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK; in kvm_riscv_gstage_ioremap()
362 for (addr = gpa; addr < end; addr += PAGE_SIZE) { in kvm_riscv_gstage_ioremap()
372 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
373 ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte); in kvm_riscv_gstage_ioremap()
374 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_ioremap()
386 void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size) in kvm_riscv_gstage_iounmap() argument
388 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
389 gstage_unmap_range(kvm, gpa, size, false); in kvm_riscv_gstage_iounmap()
390 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_iounmap()
398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() local
426 phys_addr_t size = slot->npages << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
428 spin_lock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
429 gstage_unmap_range(kvm, gpa, size, false); in kvm_arch_flush_shadow_memslot()
430 spin_unlock(&kvm->mmu_lock); in kvm_arch_flush_shadow_memslot()
443 if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) in kvm_arch_commit_memory_region()
444 gstage_wp_memory_region(kvm, new->id); in kvm_arch_commit_memory_region()
455 int ret = 0; in kvm_arch_prepare_memory_region()
459 return 0; in kvm_arch_prepare_memory_region()
462 * Prevent userspace from creating a memory region outside of the GPA in kvm_arch_prepare_memory_region()
463 * space addressable by the KVM guest GPA space. in kvm_arch_prepare_memory_region()
465 if ((new->base_gfn + new->npages) >= in kvm_arch_prepare_memory_region()
467 return -EFAULT; in kvm_arch_prepare_memory_region()
469 hva = new->userspace_addr; in kvm_arch_prepare_memory_region()
470 size = new->npages << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
472 base_gpa = new->base_gfn << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
473 writable = !(new->flags & KVM_MEM_READONLY); in kvm_arch_prepare_memory_region()
475 mmap_read_lock(current->mm); in kvm_arch_prepare_memory_region()
482 * +--------------------------------------------+ in kvm_arch_prepare_memory_region()
483 * +---------------+----------------+ +----------------+ in kvm_arch_prepare_memory_region()
485 * +---------------+----------------+ +----------------+ in kvm_arch_prepare_memory_region()
487 * +--------------------------------------------+ in kvm_arch_prepare_memory_region()
490 struct vm_area_struct *vma = find_vma(current->mm, hva); in kvm_arch_prepare_memory_region()
493 if (!vma || vma->vm_start >= reg_end) in kvm_arch_prepare_memory_region()
497 * Mapping a read-only VMA is only allowed if the in kvm_arch_prepare_memory_region()
498 * memory region is configured as read-only. in kvm_arch_prepare_memory_region()
500 if (writable && !(vma->vm_flags & VM_WRITE)) { in kvm_arch_prepare_memory_region()
501 ret = -EPERM; in kvm_arch_prepare_memory_region()
506 vm_start = max(hva, vma->vm_start); in kvm_arch_prepare_memory_region()
507 vm_end = min(reg_end, vma->vm_end); in kvm_arch_prepare_memory_region()
509 if (vma->vm_flags & VM_PFNMAP) { in kvm_arch_prepare_memory_region()
510 gpa_t gpa = base_gpa + (vm_start - hva); in kvm_arch_prepare_memory_region() local
513 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
514 pa += vm_start - vma->vm_start; in kvm_arch_prepare_memory_region()
517 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_arch_prepare_memory_region()
518 ret = -EINVAL; in kvm_arch_prepare_memory_region()
522 ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa, in kvm_arch_prepare_memory_region()
523 vm_end - vm_start, in kvm_arch_prepare_memory_region()
538 mmap_read_unlock(current->mm); in kvm_arch_prepare_memory_region()
544 if (!kvm->arch.pgd) in kvm_unmap_gfn_range()
547 gstage_unmap_range(kvm, range->start << PAGE_SHIFT, in kvm_unmap_gfn_range()
548 (range->end - range->start) << PAGE_SHIFT, in kvm_unmap_gfn_range()
549 range->may_block); in kvm_unmap_gfn_range()
556 kvm_pfn_t pfn = pte_pfn(range->arg.pte); in kvm_set_spte_gfn()
558 if (!kvm->arch.pgd) in kvm_set_spte_gfn()
561 WARN_ON(range->end - range->start != 1); in kvm_set_spte_gfn()
563 ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT, in kvm_set_spte_gfn()
566 kvm_debug("Failed to map G-stage page (error %d)\n", ret); in kvm_set_spte_gfn()
576 u32 ptep_level = 0; in kvm_age_gfn()
577 u64 size = (range->end - range->start) << PAGE_SHIFT; in kvm_age_gfn()
579 if (!kvm->arch.pgd) in kvm_age_gfn()
584 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_age_gfn()
588 return ptep_test_and_clear_young(NULL, 0, ptep); in kvm_age_gfn()
594 u32 ptep_level = 0; in kvm_test_age_gfn()
595 u64 size = (range->end - range->start) << PAGE_SHIFT; in kvm_test_age_gfn()
597 if (!kvm->arch.pgd) in kvm_test_age_gfn()
602 if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT, in kvm_test_age_gfn()
611 gpa_t gpa, unsigned long hva, bool is_write) in kvm_riscv_gstage_map() argument
617 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_riscv_gstage_map()
619 struct kvm *kvm = vcpu->kvm; in kvm_riscv_gstage_map()
620 struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache; in kvm_riscv_gstage_map()
621 bool logging = (memslot->dirty_bitmap && in kvm_riscv_gstage_map()
622 !(memslot->flags & KVM_MEM_READONLY)) ? true : false; in kvm_riscv_gstage_map()
628 kvm_err("Failed to topup G-stage cache\n"); in kvm_riscv_gstage_map()
632 mmap_read_lock(current->mm); in kvm_riscv_gstage_map()
634 vma = vma_lookup(current->mm, hva); in kvm_riscv_gstage_map()
636 kvm_err("Failed to find VMA for hva 0x%lx\n", hva); in kvm_riscv_gstage_map()
637 mmap_read_unlock(current->mm); in kvm_riscv_gstage_map()
638 return -EFAULT; in kvm_riscv_gstage_map()
646 if (logging || (vma->vm_flags & VM_PFNMAP)) in kvm_riscv_gstage_map()
650 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; in kvm_riscv_gstage_map()
655 * kvm->mmu_lock. in kvm_riscv_gstage_map()
660 mmu_seq = kvm->mmu_invalidate_seq; in kvm_riscv_gstage_map()
661 mmap_read_unlock(current->mm); in kvm_riscv_gstage_map()
666 kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize); in kvm_riscv_gstage_map()
667 return -EFAULT; in kvm_riscv_gstage_map()
674 return 0; in kvm_riscv_gstage_map()
677 return -EFAULT; in kvm_riscv_gstage_map()
686 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_map()
694 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_gstage_map()
697 ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT, in kvm_riscv_gstage_map()
702 kvm_err("Failed to map in G-stage\n"); in kvm_riscv_gstage_map()
705 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_map()
715 if (kvm->arch.pgd != NULL) { in kvm_riscv_gstage_alloc_pgd()
717 return -EINVAL; in kvm_riscv_gstage_alloc_pgd()
723 return -ENOMEM; in kvm_riscv_gstage_alloc_pgd()
724 kvm->arch.pgd = page_to_virt(pgd_page); in kvm_riscv_gstage_alloc_pgd()
725 kvm->arch.pgd_phys = page_to_phys(pgd_page); in kvm_riscv_gstage_alloc_pgd()
727 return 0; in kvm_riscv_gstage_alloc_pgd()
734 spin_lock(&kvm->mmu_lock); in kvm_riscv_gstage_free_pgd()
735 if (kvm->arch.pgd) { in kvm_riscv_gstage_free_pgd()
736 gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false); in kvm_riscv_gstage_free_pgd()
737 pgd = READ_ONCE(kvm->arch.pgd); in kvm_riscv_gstage_free_pgd()
738 kvm->arch.pgd = NULL; in kvm_riscv_gstage_free_pgd()
739 kvm->arch.pgd_phys = 0; in kvm_riscv_gstage_free_pgd()
741 spin_unlock(&kvm->mmu_lock); in kvm_riscv_gstage_free_pgd()
750 struct kvm_arch *k = &vcpu->kvm->arch; in kvm_riscv_gstage_update_hgatp()
752 hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID; in kvm_riscv_gstage_update_hgatp()
753 hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN; in kvm_riscv_gstage_update_hgatp()
764 /* Try Sv57x4 G-stage mode */ in kvm_riscv_gstage_mode_detect()
772 /* Try Sv48x4 G-stage mode */ in kvm_riscv_gstage_mode_detect()
780 csr_write(CSR_HGATP, 0); in kvm_riscv_gstage_mode_detect()