xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision 7051924f771722c6dd235e693742cda6488ac700)
1 /*
2  * Copyright (C) 2012,2013 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
20 
21 #include <asm/page.h>
22 #include <asm/memory.h>
23 
24 /*
25  * As we only have the TTBR0_EL2 register, we cannot express
26  * "negative" addresses. This makes it impossible to directly share
27  * mappings with the kernel.
28  *
29  * Instead, give the HYP mode its own VA region at a fixed offset from
30  * the kernel by just masking the top bits (which are all ones for a
31  * kernel address).
32  */
33 #define HYP_PAGE_OFFSET_SHIFT	VA_BITS
34 #define HYP_PAGE_OFFSET_MASK	((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
35 #define HYP_PAGE_OFFSET		(PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
36 
37 /*
38  * Our virtual mapping for the idmap-ed MMU-enable code. Must be
39  * shared across all the page-tables. Conveniently, we use the last
40  * possible page, where no kernel mapping will ever exist.
41  */
42 #define TRAMPOLINE_VA		(HYP_PAGE_OFFSET_MASK & PAGE_MASK)
43 
44 #ifdef __ASSEMBLY__
45 
46 /*
47  * Convert a kernel VA into a HYP VA.
48  * reg: VA to be converted.
49  */
50 .macro kern_hyp_va	reg
51 	and	\reg, \reg, #HYP_PAGE_OFFSET_MASK
52 .endm
53 
54 #else
55 
56 #include <asm/cachetype.h>
57 #include <asm/cacheflush.h>
58 
59 #define KERN_TO_HYP(kva)	((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
60 
61 /*
62  * Align KVM with the kernel's view of physical memory. Should be
63  * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration.
64  */
65 #define KVM_PHYS_SHIFT	PHYS_MASK_SHIFT
66 #define KVM_PHYS_SIZE	(1UL << KVM_PHYS_SHIFT)
67 #define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1UL)
68 
69 /* Make sure we get the right size, and thus the right alignment */
70 #define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
71 #define S2_PGD_ORDER	get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
72 
73 int create_hyp_mappings(void *from, void *to);
74 int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
75 void free_boot_hyp_pgd(void);
76 void free_hyp_pgds(void);
77 
78 int kvm_alloc_stage2_pgd(struct kvm *kvm);
79 void kvm_free_stage2_pgd(struct kvm *kvm);
80 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
81 			  phys_addr_t pa, unsigned long size);
82 
83 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
84 
85 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
86 
87 phys_addr_t kvm_mmu_get_httbr(void);
88 phys_addr_t kvm_mmu_get_boot_httbr(void);
89 phys_addr_t kvm_get_idmap_vector(void);
90 int kvm_mmu_init(void);
91 void kvm_clear_hyp_idmap(void);
92 
93 #define	kvm_set_pte(ptep, pte)		set_pte(ptep, pte)
94 #define	kvm_set_pmd(pmdp, pmd)		set_pmd(pmdp, pmd)
95 
96 static inline bool kvm_is_write_fault(unsigned long esr)
97 {
98 	unsigned long esr_ec = esr >> ESR_EL2_EC_SHIFT;
99 
100 	if (esr_ec == ESR_EL2_EC_IABT)
101 		return false;
102 
103 	if ((esr & ESR_EL2_ISV) && !(esr & ESR_EL2_WNR))
104 		return false;
105 
106 	return true;
107 }
108 
109 static inline void kvm_clean_pgd(pgd_t *pgd) {}
110 static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
111 static inline void kvm_clean_pte(pte_t *pte) {}
112 static inline void kvm_clean_pte_entry(pte_t *pte) {}
113 
114 static inline void kvm_set_s2pte_writable(pte_t *pte)
115 {
116 	pte_val(*pte) |= PTE_S2_RDWR;
117 }
118 
119 static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
120 {
121 	pmd_val(*pmd) |= PMD_S2_RDWR;
122 }
123 
124 #define kvm_pgd_addr_end(addr, end)	pgd_addr_end(addr, end)
125 #define kvm_pud_addr_end(addr, end)	pud_addr_end(addr, end)
126 #define kvm_pmd_addr_end(addr, end)	pmd_addr_end(addr, end)
127 
128 static inline bool kvm_page_empty(void *ptr)
129 {
130 	struct page *ptr_page = virt_to_page(ptr);
131 	return page_count(ptr_page) == 1;
132 }
133 
134 #define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
135 #ifndef CONFIG_ARM64_64K_PAGES
136 #define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
137 #else
138 #define kvm_pmd_table_empty(pmdp) (0)
139 #endif
140 #define kvm_pud_table_empty(pudp) (0)
141 
142 
143 struct kvm;
144 
145 #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
146 
147 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
148 {
149 	return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
150 }
151 
152 static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
153 					     unsigned long size)
154 {
155 	if (!vcpu_has_cache_enabled(vcpu))
156 		kvm_flush_dcache_to_poc((void *)hva, size);
157 
158 	if (!icache_is_aliasing()) {		/* PIPT */
159 		flush_icache_range(hva, hva + size);
160 	} else if (!icache_is_aivivt()) {	/* non ASID-tagged VIVT */
161 		/* any kind of VIPT cache */
162 		__flush_icache_all();
163 	}
164 }
165 
166 #define kvm_virt_to_phys(x)		__virt_to_phys((unsigned long)(x))
167 
168 void stage2_flush_vm(struct kvm *kvm);
169 
170 #endif /* __ASSEMBLY__ */
171 #endif /* __ARM64_KVM_MMU_H__ */
172