xref: /openbmc/linux/virt/kvm/kvm_mm.h (revision 982ed0de)
1*982ed0deSDavid Woodhouse // SPDX-License-Identifier: GPL-2.0-only
2*982ed0deSDavid Woodhouse 
3*982ed0deSDavid Woodhouse #ifndef __KVM_MM_H__
4*982ed0deSDavid Woodhouse #define __KVM_MM_H__ 1
5*982ed0deSDavid Woodhouse 
6*982ed0deSDavid Woodhouse /*
7*982ed0deSDavid Woodhouse  * Architectures can choose whether to use an rwlock or spinlock
8*982ed0deSDavid Woodhouse  * for the mmu_lock.  These macros, for use in common code
9*982ed0deSDavid Woodhouse  * only, avoids using #ifdefs in places that must deal with
10*982ed0deSDavid Woodhouse  * multiple architectures.
11*982ed0deSDavid Woodhouse  */
12*982ed0deSDavid Woodhouse 
13*982ed0deSDavid Woodhouse #ifdef KVM_HAVE_MMU_RWLOCK
14*982ed0deSDavid Woodhouse #define KVM_MMU_LOCK_INIT(kvm)		rwlock_init(&(kvm)->mmu_lock)
15*982ed0deSDavid Woodhouse #define KVM_MMU_LOCK(kvm)		write_lock(&(kvm)->mmu_lock)
16*982ed0deSDavid Woodhouse #define KVM_MMU_UNLOCK(kvm)		write_unlock(&(kvm)->mmu_lock)
17*982ed0deSDavid Woodhouse #define KVM_MMU_READ_LOCK(kvm)		read_lock(&(kvm)->mmu_lock)
18*982ed0deSDavid Woodhouse #define KVM_MMU_READ_UNLOCK(kvm)	read_unlock(&(kvm)->mmu_lock)
19*982ed0deSDavid Woodhouse #else
20*982ed0deSDavid Woodhouse #define KVM_MMU_LOCK_INIT(kvm)		spin_lock_init(&(kvm)->mmu_lock)
21*982ed0deSDavid Woodhouse #define KVM_MMU_LOCK(kvm)		spin_lock(&(kvm)->mmu_lock)
22*982ed0deSDavid Woodhouse #define KVM_MMU_UNLOCK(kvm)		spin_unlock(&(kvm)->mmu_lock)
23*982ed0deSDavid Woodhouse #define KVM_MMU_READ_LOCK(kvm)		spin_lock(&(kvm)->mmu_lock)
24*982ed0deSDavid Woodhouse #define KVM_MMU_READ_UNLOCK(kvm)	spin_unlock(&(kvm)->mmu_lock)
25*982ed0deSDavid Woodhouse #endif /* KVM_HAVE_MMU_RWLOCK */
26*982ed0deSDavid Woodhouse 
27*982ed0deSDavid Woodhouse kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
28*982ed0deSDavid Woodhouse 		     bool write_fault, bool *writable);
29*982ed0deSDavid Woodhouse 
30*982ed0deSDavid Woodhouse #ifdef CONFIG_HAVE_KVM_PFNCACHE
31*982ed0deSDavid Woodhouse void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
32*982ed0deSDavid Woodhouse 				       unsigned long start,
33*982ed0deSDavid Woodhouse 				       unsigned long end,
34*982ed0deSDavid Woodhouse 				       bool may_block);
35*982ed0deSDavid Woodhouse #else
36*982ed0deSDavid Woodhouse static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
37*982ed0deSDavid Woodhouse 						     unsigned long start,
38*982ed0deSDavid Woodhouse 						     unsigned long end,
39*982ed0deSDavid Woodhouse 						     bool may_block)
40*982ed0deSDavid Woodhouse {
41*982ed0deSDavid Woodhouse }
42*982ed0deSDavid Woodhouse #endif /* HAVE_KVM_PFNCACHE */
43*982ed0deSDavid Woodhouse 
44*982ed0deSDavid Woodhouse #endif /* __KVM_MM_H__ */
45