Lines Matching +full:gpa +full:- +full:1
19 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
23 #define KVM_MMU_CACHE_MIN_PAGES 1
30 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_mmu_free_memory_caches()
34 * kvm_pgd_init() - Initialise KVM GPA page directory.
35 * @page: Pointer to page directory (PGD) for KVM GPA.
37 * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
59 p[1] = entry; in kvm_pgd_init()
64 p[-3] = entry; in kvm_pgd_init()
65 p[-2] = entry; in kvm_pgd_init()
66 p[-1] = entry; in kvm_pgd_init()
71 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
73 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
76 * Returns: Pointer to new KVM GPA page directory.
91 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
141 /* Caller must hold kvm->mm_lock */
146 return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr); in kvm_mips_pte_for_gpa()
151 * Flush a range of guest physical address space from the VM's GPA page tables.
159 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); in kvm_mips_flush_gpa_pte()
178 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); in kvm_mips_flush_gpa_pmd()
206 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); in kvm_mips_flush_gpa_pud()
235 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); in kvm_mips_flush_gpa_pgd()
258 * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
260 * @start_gfn: Guest frame number of first page in GPA range to flush.
261 * @end_gfn: Guest frame number of last page in GPA range to flush.
263 * Flushes a range of GPA mappings from the GPA page tables.
265 * The caller must hold the @kvm->mmu_lock spinlock.
272 return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd, in kvm_mips_flush_gpa_pt()
296 ret = 1; \
376 * GPA page table to allow dirty page tracking.
382 * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean. in BUILD_PTE_RANGE_OP()
384 * @start_gfn: Guest frame number of first page in GPA range to flush. in BUILD_PTE_RANGE_OP()
385 * @end_gfn: Guest frame number of last page in GPA range to flush. in BUILD_PTE_RANGE_OP()
387 * Make a range of GPA mappings clean so that guest writes will fault and in BUILD_PTE_RANGE_OP()
390 * The caller must hold the @kvm->mmu_lock spinlock. in BUILD_PTE_RANGE_OP()
392 * Returns: Whether any GPA mappings were modified, which would require in BUILD_PTE_RANGE_OP()
398 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd, in BUILD_PTE_RANGE_OP()
404 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
412 * acquire @kvm->mmu_lock.
418 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
428 * VM's GPA page table to allow detection of commonly used pages.
436 return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd, in BUILD_PTE_RANGE_OP()
443 kvm_mips_flush_gpa_pt(kvm, range->start, range->end); in kvm_unmap_gfn_range()
449 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_set_spte_gfn() local
450 pte_t hva_pte = range->arg.pte; in kvm_set_spte_gfn()
451 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_set_spte_gfn()
459 if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) in kvm_set_spte_gfn()
461 else if (range->slot->flags & KVM_MEM_READONLY) in kvm_set_spte_gfn()
479 return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end); in kvm_age_gfn()
484 gpa_t gpa = range->start << PAGE_SHIFT; in kvm_test_age_gfn() local
485 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in kvm_test_age_gfn()
493 * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
495 * @gpa: Guest physical address of fault.
497 * @out_entry: New PTE for @gpa (written on success unless NULL).
498 * @out_buddy: New PTE for @gpa's buddy (written on success unless
501 * Perform fast path GPA fault handling, doing all that can be done without
507 * -EFAULT on failure due to absent GPA mapping or write to
508 * read-only page, in which case KVM must be consulted.
510 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, in _kvm_mips_map_page_fast() argument
514 struct kvm *kvm = vcpu->kvm; in _kvm_mips_map_page_fast()
515 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast()
521 spin_lock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
523 /* Fast path - just check GPA page table for an existing entry */ in _kvm_mips_map_page_fast()
524 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); in _kvm_mips_map_page_fast()
526 ret = -EFAULT; in _kvm_mips_map_page_fast()
539 ret = -EFAULT; in _kvm_mips_map_page_fast()
556 spin_unlock(&kvm->mmu_lock); in _kvm_mips_map_page_fast()
563 * kvm_mips_map_page() - Map a guest physical page.
565 * @gpa: Guest physical address of fault.
567 * @out_entry: New PTE for @gpa (written on success unless NULL).
568 * @out_buddy: New PTE for @gpa's buddy (written on success unless
571 * Handle GPA faults by creating a new GPA mapping (or updating an existing
575 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
582 * -EFAULT if there is no memory region at @gpa or a write was
583 * attempted to a read-only memory region. This is usually handled
586 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, in kvm_mips_map_page() argument
590 struct kvm *kvm = vcpu->kvm; in kvm_mips_map_page()
591 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; in kvm_mips_map_page()
592 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page()
601 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_mips_map_page()
602 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry, in kvm_mips_map_page()
617 mmu_seq = kvm->mmu_invalidate_seq; in kvm_mips_map_page()
631 /* Slow path - ask KVM core whether we can access this GPA */ in kvm_mips_map_page()
634 err = -EFAULT; in kvm_mips_map_page()
638 spin_lock(&kvm->mmu_lock); in kvm_mips_map_page()
646 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
652 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa); in kvm_mips_map_page()
675 spin_unlock(&kvm->mmu_lock); in kvm_mips_map_page()
679 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_mips_map_page()
698 * kvm_mips_migrate_count() - Migrate timer.
710 if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) in kvm_mips_migrate_count()
711 hrtimer_restart(&vcpu->arch.comparecount_timer); in kvm_mips_migrate_count()
723 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
724 if (vcpu->arch.last_sched_cpu != cpu) { in kvm_arch_vcpu_load()
725 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", in kvm_arch_vcpu_load()
726 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); in kvm_arch_vcpu_load()
736 kvm_mips_callbacks->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
750 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()
751 vcpu->cpu = -1; in kvm_arch_vcpu_put()
754 kvm_mips_callbacks->vcpu_put(vcpu, cpu); in kvm_arch_vcpu_put()