xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision e329fb75d519e3dc3eb11b22d5bb846516be3521)
137c43753SMarc Zyngier /*
237c43753SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
337c43753SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
437c43753SMarc Zyngier  *
537c43753SMarc Zyngier  * This program is free software; you can redistribute it and/or modify
637c43753SMarc Zyngier  * it under the terms of the GNU General Public License version 2 as
737c43753SMarc Zyngier  * published by the Free Software Foundation.
837c43753SMarc Zyngier  *
937c43753SMarc Zyngier  * This program is distributed in the hope that it will be useful,
1037c43753SMarc Zyngier  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1137c43753SMarc Zyngier  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1237c43753SMarc Zyngier  * GNU General Public License for more details.
1337c43753SMarc Zyngier  *
1437c43753SMarc Zyngier  * You should have received a copy of the GNU General Public License
1537c43753SMarc Zyngier  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
1637c43753SMarc Zyngier  */
1737c43753SMarc Zyngier 
1837c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__
1937c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__
2037c43753SMarc Zyngier 
2137c43753SMarc Zyngier #include <asm/page.h>
2237c43753SMarc Zyngier #include <asm/memory.h>
2320475f78SVladimir Murzin #include <asm/cpufeature.h>
2437c43753SMarc Zyngier 
2537c43753SMarc Zyngier /*
26cedbb8b7SMarc Zyngier  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
2737c43753SMarc Zyngier  * "negative" addresses. This makes it impossible to directly share
2837c43753SMarc Zyngier  * mappings with the kernel.
2937c43753SMarc Zyngier  *
3037c43753SMarc Zyngier  * Instead, give the HYP mode its own VA region at a fixed offset from
3137c43753SMarc Zyngier  * the kernel by just masking the top bits (which are all ones for a
3282a81bffSMarc Zyngier  * kernel address). We need to find out how many bits to mask.
33cedbb8b7SMarc Zyngier  *
3482a81bffSMarc Zyngier  * We want to build a set of page tables that cover both parts of the
3582a81bffSMarc Zyngier  * idmap (the trampoline page used to initialize EL2), and our normal
3682a81bffSMarc Zyngier  * runtime VA space, at the same time.
3782a81bffSMarc Zyngier  *
3882a81bffSMarc Zyngier  * Given that the kernel uses VA_BITS for its entire address space,
3982a81bffSMarc Zyngier  * and that half of that space (VA_BITS - 1) is used for the linear
4082a81bffSMarc Zyngier  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
4182a81bffSMarc Zyngier  *
4282a81bffSMarc Zyngier  * The main question is "Within the VA_BITS space, does EL2 use the
4382a81bffSMarc Zyngier  * top or the bottom half of that space to shadow the kernel's linear
4482a81bffSMarc Zyngier  * mapping?". As we need to idmap the trampoline page, this is
4582a81bffSMarc Zyngier  * determined by the range in which this page lives.
4682a81bffSMarc Zyngier  *
4782a81bffSMarc Zyngier  * If the page is in the bottom half, we have to use the top half. If
4882a81bffSMarc Zyngier  * the page is in the top half, we have to use the bottom half:
4982a81bffSMarc Zyngier  *
502077be67SLaura Abbott  * T = __pa_symbol(__hyp_idmap_text_start)
5182a81bffSMarc Zyngier  * if (T & BIT(VA_BITS - 1))
5282a81bffSMarc Zyngier  *	HYP_VA_MIN = 0  //idmap in upper half
5382a81bffSMarc Zyngier  * else
5482a81bffSMarc Zyngier  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
5582a81bffSMarc Zyngier  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
5682a81bffSMarc Zyngier  *
5782a81bffSMarc Zyngier  * This of course assumes that the trampoline page exists within the
5882a81bffSMarc Zyngier  * VA_BITS range. If it doesn't, then it means we're in the odd case
5982a81bffSMarc Zyngier  * where the kernel idmap (as well as HYP) uses more levels than the
6082a81bffSMarc Zyngier  * kernel runtime page tables (as seen when the kernel is configured
6182a81bffSMarc Zyngier  * for 4k pages, 39bits VA, and yet memory lives just above that
6282a81bffSMarc Zyngier  * limit, forcing the idmap to use 4 levels of page tables while the
6382a81bffSMarc Zyngier  * kernel itself only uses 3). In this particular case, it doesn't
6482a81bffSMarc Zyngier  * matter which side of VA_BITS we use, as we're guaranteed not to
6582a81bffSMarc Zyngier  * conflict with anything.
6682a81bffSMarc Zyngier  *
6782a81bffSMarc Zyngier  * When using VHE, there are no separate hyp mappings and all KVM
6882a81bffSMarc Zyngier  * functionality is already mapped as part of the main kernel
6982a81bffSMarc Zyngier  * mappings, and none of this applies in that case.
7037c43753SMarc Zyngier  */
71d53d9bc6SMarc Zyngier 
7237c43753SMarc Zyngier #ifdef __ASSEMBLY__
7337c43753SMarc Zyngier 
74cedbb8b7SMarc Zyngier #include <asm/alternative.h>
75cedbb8b7SMarc Zyngier 
7637c43753SMarc Zyngier /*
7737c43753SMarc Zyngier  * Convert a kernel VA into a HYP VA.
7837c43753SMarc Zyngier  * reg: VA to be converted.
79fd81e6bfSMarc Zyngier  *
802b4d1606SMarc Zyngier  * The actual code generation takes place in kvm_update_va_mask, and
812b4d1606SMarc Zyngier  * the instructions below are only there to reserve the space and
822b4d1606SMarc Zyngier  * perform the register allocation (kvm_update_va_mask uses the
832b4d1606SMarc Zyngier  * specific registers encoded in the instructions).
8437c43753SMarc Zyngier  */
8537c43753SMarc Zyngier .macro kern_hyp_va	reg
862b4d1606SMarc Zyngier alternative_cb kvm_update_va_mask
87ed57cac8SMarc Zyngier 	and     \reg, \reg, #1		/* mask with va_mask */
88ed57cac8SMarc Zyngier 	ror	\reg, \reg, #1		/* rotate to the first tag bit */
89ed57cac8SMarc Zyngier 	add	\reg, \reg, #0		/* insert the low 12 bits of the tag */
90ed57cac8SMarc Zyngier 	add	\reg, \reg, #0, lsl 12	/* insert the top 12 bits of the tag */
91ed57cac8SMarc Zyngier 	ror	\reg, \reg, #63		/* rotate back */
922b4d1606SMarc Zyngier alternative_cb_end
9337c43753SMarc Zyngier .endm
9437c43753SMarc Zyngier 
9537c43753SMarc Zyngier #else
9637c43753SMarc Zyngier 
9738f791a4SChristoffer Dall #include <asm/pgalloc.h>
9802f7760eSWill Deacon #include <asm/cache.h>
9937c43753SMarc Zyngier #include <asm/cacheflush.h>
100e4c5a685SArd Biesheuvel #include <asm/mmu_context.h>
101e4c5a685SArd Biesheuvel #include <asm/pgtable.h>
10237c43753SMarc Zyngier 
1032b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt,
1042b4d1606SMarc Zyngier 			__le32 *origptr, __le32 *updptr, int nr_inst);
1052b4d1606SMarc Zyngier 
106fd81e6bfSMarc Zyngier static inline unsigned long __kern_hyp_va(unsigned long v)
107fd81e6bfSMarc Zyngier {
108ed57cac8SMarc Zyngier 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
109ed57cac8SMarc Zyngier 				    "ror %0, %0, #1\n"
110ed57cac8SMarc Zyngier 				    "add %0, %0, #0\n"
111ed57cac8SMarc Zyngier 				    "add %0, %0, #0, lsl 12\n"
112ed57cac8SMarc Zyngier 				    "ror %0, %0, #63\n",
1132b4d1606SMarc Zyngier 				    kvm_update_va_mask)
1142b4d1606SMarc Zyngier 		     : "+r" (v));
115fd81e6bfSMarc Zyngier 	return v;
116fd81e6bfSMarc Zyngier }
117fd81e6bfSMarc Zyngier 
11894d0e598SMarc Zyngier #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
11937c43753SMarc Zyngier 
12037c43753SMarc Zyngier /*
12144a497abSMarc Zyngier  * Obtain the PC-relative address of a kernel symbol
12244a497abSMarc Zyngier  * s: symbol
12344a497abSMarc Zyngier  *
12444a497abSMarc Zyngier  * The goal of this macro is to return a symbol's address based on a
12544a497abSMarc Zyngier  * PC-relative computation, as opposed to a loading the VA from a
12644a497abSMarc Zyngier  * constant pool or something similar. This works well for HYP, as an
12744a497abSMarc Zyngier  * absolute VA is guaranteed to be wrong. Only use this if trying to
12844a497abSMarc Zyngier  * obtain the address of a symbol (i.e. not something you obtained by
12944a497abSMarc Zyngier  * following a pointer).
13044a497abSMarc Zyngier  */
13144a497abSMarc Zyngier #define hyp_symbol_addr(s)						\
13244a497abSMarc Zyngier 	({								\
13344a497abSMarc Zyngier 		typeof(s) *addr;					\
13444a497abSMarc Zyngier 		asm("adrp	%0, %1\n"				\
13544a497abSMarc Zyngier 		    "add	%0, %0, :lo12:%1\n"			\
13644a497abSMarc Zyngier 		    : "=r" (addr) : "S" (&s));				\
13744a497abSMarc Zyngier 		addr;							\
13844a497abSMarc Zyngier 	})
13944a497abSMarc Zyngier 
14044a497abSMarc Zyngier /*
141dbff124eSJoel Schopp  * We currently only support a 40bit IPA.
14237c43753SMarc Zyngier  */
143dbff124eSJoel Schopp #define KVM_PHYS_SHIFT	(40)
144e55cac5bSSuzuki K Poulose 
14513ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
146e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
147e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
14837c43753SMarc Zyngier 
149865b30cdSSuzuki K Poulose static inline bool kvm_page_empty(void *ptr)
150865b30cdSSuzuki K Poulose {
151865b30cdSSuzuki K Poulose 	struct page *ptr_page = virt_to_page(ptr);
152865b30cdSSuzuki K Poulose 	return page_count(ptr_page) == 1;
153865b30cdSSuzuki K Poulose }
15437c43753SMarc Zyngier 
155c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h>
156c0ef6326SSuzuki K Poulose 
157c8dddecdSMarc Zyngier int create_hyp_mappings(void *from, void *to, pgprot_t prot);
158807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
1591bb32a44SMarc Zyngier 			   void __iomem **kaddr,
1601bb32a44SMarc Zyngier 			   void __iomem **haddr);
161dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
162dc2e4633SMarc Zyngier 			     void **haddr);
16337c43753SMarc Zyngier void free_hyp_pgds(void);
16437c43753SMarc Zyngier 
165957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm);
16637c43753SMarc Zyngier int kvm_alloc_stage2_pgd(struct kvm *kvm);
16737c43753SMarc Zyngier void kvm_free_stage2_pgd(struct kvm *kvm);
16837c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
169c40f2f8fSArd Biesheuvel 			  phys_addr_t pa, unsigned long size, bool writable);
17037c43753SMarc Zyngier 
17137c43753SMarc Zyngier int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
17237c43753SMarc Zyngier 
17337c43753SMarc Zyngier void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
17437c43753SMarc Zyngier 
17537c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void);
17637c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void);
17737c43753SMarc Zyngier int kvm_mmu_init(void);
17837c43753SMarc Zyngier void kvm_clear_hyp_idmap(void);
17937c43753SMarc Zyngier 
1800db9dd8aSMarc Zyngier #define kvm_mk_pmd(ptep)					\
1810db9dd8aSMarc Zyngier 	__pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
1820db9dd8aSMarc Zyngier #define kvm_mk_pud(pmdp)					\
1830db9dd8aSMarc Zyngier 	__pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
1840db9dd8aSMarc Zyngier #define kvm_mk_pgd(pudp)					\
1850db9dd8aSMarc Zyngier 	__pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE)
1860db9dd8aSMarc Zyngier 
187b8e0ba7cSPunit Agrawal #define kvm_set_pud(pudp, pud)		set_pud(pudp, pud)
188b8e0ba7cSPunit Agrawal 
189f8df7338SPunit Agrawal #define kvm_pfn_pte(pfn, prot)		pfn_pte(pfn, prot)
190f8df7338SPunit Agrawal #define kvm_pfn_pmd(pfn, prot)		pfn_pmd(pfn, prot)
191b8e0ba7cSPunit Agrawal #define kvm_pfn_pud(pfn, prot)		pfn_pud(pfn, prot)
192f8df7338SPunit Agrawal 
193eb3f0624SPunit Agrawal #define kvm_pud_pfn(pud)		pud_pfn(pud)
194eb3f0624SPunit Agrawal 
195f8df7338SPunit Agrawal #define kvm_pmd_mkhuge(pmd)		pmd_mkhuge(pmd)
196b8e0ba7cSPunit Agrawal #define kvm_pud_mkhuge(pud)		pud_mkhuge(pud)
197f8df7338SPunit Agrawal 
19806485053SCatalin Marinas static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
19937c43753SMarc Zyngier {
20006485053SCatalin Marinas 	pte_val(pte) |= PTE_S2_RDWR;
20106485053SCatalin Marinas 	return pte;
20237c43753SMarc Zyngier }
20337c43753SMarc Zyngier 
20406485053SCatalin Marinas static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
205ad361f09SChristoffer Dall {
20606485053SCatalin Marinas 	pmd_val(pmd) |= PMD_S2_RDWR;
20706485053SCatalin Marinas 	return pmd;
208ad361f09SChristoffer Dall }
209ad361f09SChristoffer Dall 
210b8e0ba7cSPunit Agrawal static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
211b8e0ba7cSPunit Agrawal {
212b8e0ba7cSPunit Agrawal 	pud_val(pud) |= PUD_S2_RDWR;
213b8e0ba7cSPunit Agrawal 	return pud;
214b8e0ba7cSPunit Agrawal }
215b8e0ba7cSPunit Agrawal 
216d0e22b4aSMarc Zyngier static inline pte_t kvm_s2pte_mkexec(pte_t pte)
217d0e22b4aSMarc Zyngier {
218d0e22b4aSMarc Zyngier 	pte_val(pte) &= ~PTE_S2_XN;
219d0e22b4aSMarc Zyngier 	return pte;
220d0e22b4aSMarc Zyngier }
221d0e22b4aSMarc Zyngier 
222d0e22b4aSMarc Zyngier static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
223d0e22b4aSMarc Zyngier {
224d0e22b4aSMarc Zyngier 	pmd_val(pmd) &= ~PMD_S2_XN;
225d0e22b4aSMarc Zyngier 	return pmd;
226d0e22b4aSMarc Zyngier }
227d0e22b4aSMarc Zyngier 
228b8e0ba7cSPunit Agrawal static inline pud_t kvm_s2pud_mkexec(pud_t pud)
229b8e0ba7cSPunit Agrawal {
230b8e0ba7cSPunit Agrawal 	pud_val(pud) &= ~PUD_S2_XN;
231b8e0ba7cSPunit Agrawal 	return pud;
232b8e0ba7cSPunit Agrawal }
233b8e0ba7cSPunit Agrawal 
23420a004e7SWill Deacon static inline void kvm_set_s2pte_readonly(pte_t *ptep)
2358199ed0eSMario Smarduch {
2360966253dSCatalin Marinas 	pteval_t old_pteval, pteval;
23706485053SCatalin Marinas 
23820a004e7SWill Deacon 	pteval = READ_ONCE(pte_val(*ptep));
2390966253dSCatalin Marinas 	do {
2400966253dSCatalin Marinas 		old_pteval = pteval;
2410966253dSCatalin Marinas 		pteval &= ~PTE_S2_RDWR;
2420966253dSCatalin Marinas 		pteval |= PTE_S2_RDONLY;
24320a004e7SWill Deacon 		pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
2440966253dSCatalin Marinas 	} while (pteval != old_pteval);
2458199ed0eSMario Smarduch }
2468199ed0eSMario Smarduch 
24720a004e7SWill Deacon static inline bool kvm_s2pte_readonly(pte_t *ptep)
2488199ed0eSMario Smarduch {
24920a004e7SWill Deacon 	return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
2508199ed0eSMario Smarduch }
2518199ed0eSMario Smarduch 
25220a004e7SWill Deacon static inline bool kvm_s2pte_exec(pte_t *ptep)
2537a3796d2SMarc Zyngier {
25420a004e7SWill Deacon 	return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
2557a3796d2SMarc Zyngier }
2567a3796d2SMarc Zyngier 
25720a004e7SWill Deacon static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
2588199ed0eSMario Smarduch {
25920a004e7SWill Deacon 	kvm_set_s2pte_readonly((pte_t *)pmdp);
2608199ed0eSMario Smarduch }
2618199ed0eSMario Smarduch 
26220a004e7SWill Deacon static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
2638199ed0eSMario Smarduch {
26420a004e7SWill Deacon 	return kvm_s2pte_readonly((pte_t *)pmdp);
26538f791a4SChristoffer Dall }
26638f791a4SChristoffer Dall 
26720a004e7SWill Deacon static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
2687a3796d2SMarc Zyngier {
26920a004e7SWill Deacon 	return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
2707a3796d2SMarc Zyngier }
2717a3796d2SMarc Zyngier 
2724ea5af53SPunit Agrawal static inline void kvm_set_s2pud_readonly(pud_t *pudp)
2734ea5af53SPunit Agrawal {
2744ea5af53SPunit Agrawal 	kvm_set_s2pte_readonly((pte_t *)pudp);
2754ea5af53SPunit Agrawal }
2764ea5af53SPunit Agrawal 
2774ea5af53SPunit Agrawal static inline bool kvm_s2pud_readonly(pud_t *pudp)
2784ea5af53SPunit Agrawal {
2794ea5af53SPunit Agrawal 	return kvm_s2pte_readonly((pte_t *)pudp);
2804ea5af53SPunit Agrawal }
2814ea5af53SPunit Agrawal 
28286d1c55eSPunit Agrawal static inline bool kvm_s2pud_exec(pud_t *pudp)
28386d1c55eSPunit Agrawal {
28486d1c55eSPunit Agrawal 	return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
28586d1c55eSPunit Agrawal }
28686d1c55eSPunit Agrawal 
287eb3f0624SPunit Agrawal static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
288eb3f0624SPunit Agrawal {
289eb3f0624SPunit Agrawal 	return pud_mkyoung(pud);
290eb3f0624SPunit Agrawal }
291eb3f0624SPunit Agrawal 
29235a63966SPunit Agrawal static inline bool kvm_s2pud_young(pud_t pud)
29335a63966SPunit Agrawal {
29435a63966SPunit Agrawal 	return pud_young(pud);
29535a63966SPunit Agrawal }
29635a63966SPunit Agrawal 
29766f877faSSuzuki K Poulose #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
29838f791a4SChristoffer Dall 
29938f791a4SChristoffer Dall #ifdef __PAGETABLE_PMD_FOLDED
30066f877faSSuzuki K Poulose #define hyp_pmd_table_empty(pmdp) (0)
3014f853a71SChristoffer Dall #else
30266f877faSSuzuki K Poulose #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
3034f853a71SChristoffer Dall #endif
30438f791a4SChristoffer Dall 
30538f791a4SChristoffer Dall #ifdef __PAGETABLE_PUD_FOLDED
30666f877faSSuzuki K Poulose #define hyp_pud_table_empty(pudp) (0)
30738f791a4SChristoffer Dall #else
30866f877faSSuzuki K Poulose #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
30938f791a4SChristoffer Dall #endif
3104f853a71SChristoffer Dall 
31137c43753SMarc Zyngier struct kvm;
31237c43753SMarc Zyngier 
3132d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
3142d58b733SMarc Zyngier 
3152d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
3162d58b733SMarc Zyngier {
3178d404c4cSChristoffer Dall 	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
3182d58b733SMarc Zyngier }
3192d58b733SMarc Zyngier 
32017ab9d57SMarc Zyngier static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
32137c43753SMarc Zyngier {
3220d3e4d4fSMarc Zyngier 	void *va = page_address(pfn_to_page(pfn));
3230d3e4d4fSMarc Zyngier 
324e48d53a9SMarc Zyngier 	/*
325e48d53a9SMarc Zyngier 	 * With FWB, we ensure that the guest always accesses memory using
326e48d53a9SMarc Zyngier 	 * cacheable attributes, and we don't have to clean to PoC when
327e48d53a9SMarc Zyngier 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
328e48d53a9SMarc Zyngier 	 * PoU is not required either in this case.
329e48d53a9SMarc Zyngier 	 */
330e48d53a9SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
331e48d53a9SMarc Zyngier 		return;
332e48d53a9SMarc Zyngier 
3330d3e4d4fSMarc Zyngier 	kvm_flush_dcache_to_poc(va, size);
334a15f6939SMarc Zyngier }
3352d58b733SMarc Zyngier 
33617ab9d57SMarc Zyngier static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
337a15f6939SMarc Zyngier 						  unsigned long size)
338a15f6939SMarc Zyngier {
33987da236eSWill Deacon 	if (icache_is_aliasing()) {
34037c43753SMarc Zyngier 		/* any kind of VIPT cache */
34137c43753SMarc Zyngier 		__flush_icache_all();
34287da236eSWill Deacon 	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
34387da236eSWill Deacon 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
344a15f6939SMarc Zyngier 		void *va = page_address(pfn_to_page(pfn));
345a15f6939SMarc Zyngier 
3464fee9473SMarc Zyngier 		invalidate_icache_range((unsigned long)va,
34787da236eSWill Deacon 					(unsigned long)va + size);
34837c43753SMarc Zyngier 	}
34937c43753SMarc Zyngier }
35037c43753SMarc Zyngier 
351363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pte(pte_t pte)
352363ef89fSMarc Zyngier {
353e48d53a9SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
354363ef89fSMarc Zyngier 		struct page *page = pte_page(pte);
355363ef89fSMarc Zyngier 		kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
356363ef89fSMarc Zyngier 	}
357e48d53a9SMarc Zyngier }
358363ef89fSMarc Zyngier 
359363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
360363ef89fSMarc Zyngier {
361e48d53a9SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
362363ef89fSMarc Zyngier 		struct page *page = pmd_page(pmd);
363363ef89fSMarc Zyngier 		kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
364363ef89fSMarc Zyngier 	}
365e48d53a9SMarc Zyngier }
366363ef89fSMarc Zyngier 
367363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pud(pud_t pud)
368363ef89fSMarc Zyngier {
369e48d53a9SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
370363ef89fSMarc Zyngier 		struct page *page = pud_page(pud);
371363ef89fSMarc Zyngier 		kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
372363ef89fSMarc Zyngier 	}
373e48d53a9SMarc Zyngier }
374363ef89fSMarc Zyngier 
3752077be67SLaura Abbott #define kvm_virt_to_phys(x)		__pa_symbol(x)
37637c43753SMarc Zyngier 
3773c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu);
3783c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
3799d218a1fSMarc Zyngier 
380e4c5a685SArd Biesheuvel static inline bool __kvm_cpu_uses_extended_idmap(void)
381e4c5a685SArd Biesheuvel {
382fa2a8445SKristina Martsenko 	return __cpu_uses_extended_idmap_level();
383fa2a8445SKristina Martsenko }
384fa2a8445SKristina Martsenko 
385fa2a8445SKristina Martsenko static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
386fa2a8445SKristina Martsenko {
387fa2a8445SKristina Martsenko 	return idmap_ptrs_per_pgd;
388e4c5a685SArd Biesheuvel }
389e4c5a685SArd Biesheuvel 
39019338304SKristina Martsenko /*
39119338304SKristina Martsenko  * Can't use pgd_populate here, because the extended idmap adds an extra level
39219338304SKristina Martsenko  * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
39319338304SKristina Martsenko  * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
39419338304SKristina Martsenko  */
395e4c5a685SArd Biesheuvel static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
396e4c5a685SArd Biesheuvel 				       pgd_t *hyp_pgd,
397e4c5a685SArd Biesheuvel 				       pgd_t *merged_hyp_pgd,
398e4c5a685SArd Biesheuvel 				       unsigned long hyp_idmap_start)
399e4c5a685SArd Biesheuvel {
400e4c5a685SArd Biesheuvel 	int idmap_idx;
40175387b92SKristina Martsenko 	u64 pgd_addr;
402e4c5a685SArd Biesheuvel 
403e4c5a685SArd Biesheuvel 	/*
404e4c5a685SArd Biesheuvel 	 * Use the first entry to access the HYP mappings. It is
405e4c5a685SArd Biesheuvel 	 * guaranteed to be free, otherwise we wouldn't use an
406e4c5a685SArd Biesheuvel 	 * extended idmap.
407e4c5a685SArd Biesheuvel 	 */
408e4c5a685SArd Biesheuvel 	VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
40975387b92SKristina Martsenko 	pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
41075387b92SKristina Martsenko 	merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
411e4c5a685SArd Biesheuvel 
412e4c5a685SArd Biesheuvel 	/*
413e4c5a685SArd Biesheuvel 	 * Create another extended level entry that points to the boot HYP map,
414e4c5a685SArd Biesheuvel 	 * which contains an ID mapping of the HYP init code. We essentially
415e4c5a685SArd Biesheuvel 	 * merge the boot and runtime HYP maps by doing so, but they don't
416e4c5a685SArd Biesheuvel 	 * overlap anyway, so this is fine.
417e4c5a685SArd Biesheuvel 	 */
418e4c5a685SArd Biesheuvel 	idmap_idx = hyp_idmap_start >> VA_BITS;
419e4c5a685SArd Biesheuvel 	VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
42075387b92SKristina Martsenko 	pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
42175387b92SKristina Martsenko 	merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
422e4c5a685SArd Biesheuvel }
423e4c5a685SArd Biesheuvel 
42420475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void)
42520475f78SVladimir Murzin {
42646823dd1SDave Martin 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
42720475f78SVladimir Murzin 
42828c5dcb2SSuzuki K Poulose 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
42920475f78SVladimir Murzin }
43020475f78SVladimir Murzin 
431bf308242SAndre Przywara /*
432bf308242SAndre Przywara  * We are not in the kvm->srcu critical section most of the time, so we take
433bf308242SAndre Przywara  * the SRCU read lock here. Since we copy the data from the user page, we
434bf308242SAndre Przywara  * can immediately drop the lock again.
435bf308242SAndre Przywara  */
436bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm,
437bf308242SAndre Przywara 				      gpa_t gpa, void *data, unsigned long len)
438bf308242SAndre Przywara {
439bf308242SAndre Przywara 	int srcu_idx = srcu_read_lock(&kvm->srcu);
440bf308242SAndre Przywara 	int ret = kvm_read_guest(kvm, gpa, data, len);
441bf308242SAndre Przywara 
442bf308242SAndre Przywara 	srcu_read_unlock(&kvm->srcu, srcu_idx);
443bf308242SAndre Przywara 
444bf308242SAndre Przywara 	return ret;
445bf308242SAndre Przywara }
446bf308242SAndre Przywara 
447dee39247SMarc Zyngier #ifdef CONFIG_KVM_INDIRECT_VECTORS
448dee39247SMarc Zyngier /*
449dee39247SMarc Zyngier  * EL2 vectors can be mapped and rerouted in a number of ways,
450dee39247SMarc Zyngier  * depending on the kernel configuration and CPU present:
451dee39247SMarc Zyngier  *
452dee39247SMarc Zyngier  * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
453dee39247SMarc Zyngier  *   hardening sequence is placed in one of the vector slots, which is
454dee39247SMarc Zyngier  *   executed before jumping to the real vectors.
455dee39247SMarc Zyngier  *
456dee39247SMarc Zyngier  * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
457dee39247SMarc Zyngier  *   ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
458dee39247SMarc Zyngier  *   hardening sequence is mapped next to the idmap page, and executed
459dee39247SMarc Zyngier  *   before jumping to the real vectors.
460dee39247SMarc Zyngier  *
461dee39247SMarc Zyngier  * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
462dee39247SMarc Zyngier  *   empty slot is selected, mapped next to the idmap page, and
463dee39247SMarc Zyngier  *   executed before jumping to the real vectors.
464dee39247SMarc Zyngier  *
465dee39247SMarc Zyngier  * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
466dee39247SMarc Zyngier  * VHE, as we don't have hypervisor-specific mappings. If the system
467dee39247SMarc Zyngier  * is VHE and yet selects this capability, it will be ignored.
468dee39247SMarc Zyngier  */
4696840bdd7SMarc Zyngier #include <asm/mmu.h>
4706840bdd7SMarc Zyngier 
471dee39247SMarc Zyngier extern void *__kvm_bp_vect_base;
472dee39247SMarc Zyngier extern int __kvm_harden_el2_vector_slot;
473dee39247SMarc Zyngier 
4746840bdd7SMarc Zyngier static inline void *kvm_get_hyp_vector(void)
4756840bdd7SMarc Zyngier {
4766840bdd7SMarc Zyngier 	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
477dee39247SMarc Zyngier 	void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
478dee39247SMarc Zyngier 	int slot = -1;
4796840bdd7SMarc Zyngier 
480dee39247SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
481dee39247SMarc Zyngier 		vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
482dee39247SMarc Zyngier 		slot = data->hyp_vectors_slot;
4836840bdd7SMarc Zyngier 	}
4846840bdd7SMarc Zyngier 
485dee39247SMarc Zyngier 	if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
486dee39247SMarc Zyngier 		vect = __kvm_bp_vect_base;
487dee39247SMarc Zyngier 		if (slot == -1)
488dee39247SMarc Zyngier 			slot = __kvm_harden_el2_vector_slot;
489dee39247SMarc Zyngier 	}
490dee39247SMarc Zyngier 
491dee39247SMarc Zyngier 	if (slot != -1)
492dee39247SMarc Zyngier 		vect += slot * SZ_2K;
493dee39247SMarc Zyngier 
4946840bdd7SMarc Zyngier 	return vect;
4956840bdd7SMarc Zyngier }
4966840bdd7SMarc Zyngier 
497dee39247SMarc Zyngier /*  This is only called on a !VHE system */
4986840bdd7SMarc Zyngier static inline int kvm_map_vectors(void)
4996840bdd7SMarc Zyngier {
500dee39247SMarc Zyngier 	/*
501dee39247SMarc Zyngier 	 * HBP  = ARM64_HARDEN_BRANCH_PREDICTOR
502dee39247SMarc Zyngier 	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
503dee39247SMarc Zyngier 	 *
504dee39247SMarc Zyngier 	 * !HBP + !HEL2 -> use direct vectors
505dee39247SMarc Zyngier 	 *  HBP + !HEL2 -> use hardened vectors in place
506dee39247SMarc Zyngier 	 * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
507dee39247SMarc Zyngier 	 *  HBP +  HEL2 -> use hardened vertors and use exec mapping
508dee39247SMarc Zyngier 	 */
509dee39247SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
510dee39247SMarc Zyngier 		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
511dee39247SMarc Zyngier 		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
5126840bdd7SMarc Zyngier 	}
5136840bdd7SMarc Zyngier 
514dee39247SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
515dee39247SMarc Zyngier 		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
516dee39247SMarc Zyngier 		unsigned long size = (__bp_harden_hyp_vecs_end -
517dee39247SMarc Zyngier 				      __bp_harden_hyp_vecs_start);
518dee39247SMarc Zyngier 
519dee39247SMarc Zyngier 		/*
520dee39247SMarc Zyngier 		 * Always allocate a spare vector slot, as we don't
521dee39247SMarc Zyngier 		 * know yet which CPUs have a BP hardening slot that
522dee39247SMarc Zyngier 		 * we can reuse.
523dee39247SMarc Zyngier 		 */
524dee39247SMarc Zyngier 		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
525dee39247SMarc Zyngier 		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
526dee39247SMarc Zyngier 		return create_hyp_exec_mappings(vect_pa, size,
527dee39247SMarc Zyngier 						&__kvm_bp_vect_base);
528dee39247SMarc Zyngier 	}
529dee39247SMarc Zyngier 
530dee39247SMarc Zyngier 	return 0;
531dee39247SMarc Zyngier }
5326840bdd7SMarc Zyngier #else
5336840bdd7SMarc Zyngier static inline void *kvm_get_hyp_vector(void)
5346840bdd7SMarc Zyngier {
5353c5e8123SMarc Zyngier 	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
5366840bdd7SMarc Zyngier }
5376840bdd7SMarc Zyngier 
5386840bdd7SMarc Zyngier static inline int kvm_map_vectors(void)
5396840bdd7SMarc Zyngier {
5406840bdd7SMarc Zyngier 	return 0;
5416840bdd7SMarc Zyngier }
5426840bdd7SMarc Zyngier #endif
5436840bdd7SMarc Zyngier 
54455e3748eSMarc Zyngier #ifdef CONFIG_ARM64_SSBD
54555e3748eSMarc Zyngier DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
54655e3748eSMarc Zyngier 
54755e3748eSMarc Zyngier static inline int hyp_map_aux_data(void)
54855e3748eSMarc Zyngier {
54955e3748eSMarc Zyngier 	int cpu, err;
55055e3748eSMarc Zyngier 
55155e3748eSMarc Zyngier 	for_each_possible_cpu(cpu) {
55255e3748eSMarc Zyngier 		u64 *ptr;
55355e3748eSMarc Zyngier 
55455e3748eSMarc Zyngier 		ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
55555e3748eSMarc Zyngier 		err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
55655e3748eSMarc Zyngier 		if (err)
55755e3748eSMarc Zyngier 			return err;
55855e3748eSMarc Zyngier 	}
55955e3748eSMarc Zyngier 	return 0;
56055e3748eSMarc Zyngier }
56155e3748eSMarc Zyngier #else
56255e3748eSMarc Zyngier static inline int hyp_map_aux_data(void)
56355e3748eSMarc Zyngier {
56455e3748eSMarc Zyngier 	return 0;
56555e3748eSMarc Zyngier }
56655e3748eSMarc Zyngier #endif
56755e3748eSMarc Zyngier 
568529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
569529c4b05SKristina Martsenko 
57059558330SSuzuki K Poulose /*
57159558330SSuzuki K Poulose  * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
57259558330SSuzuki K Poulose  * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
57359558330SSuzuki K Poulose  * 52bit IPS.
57459558330SSuzuki K Poulose  */
57559558330SSuzuki K Poulose static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
57659558330SSuzuki K Poulose {
57759558330SSuzuki K Poulose 	int x = ARM64_VTTBR_X(ipa_shift, levels);
57859558330SSuzuki K Poulose 
57959558330SSuzuki K Poulose 	return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
58059558330SSuzuki K Poulose }
58159558330SSuzuki K Poulose 
58259558330SSuzuki K Poulose static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
58359558330SSuzuki K Poulose {
58459558330SSuzuki K Poulose 	unsigned int x = arm64_vttbr_x(ipa_shift, levels);
58559558330SSuzuki K Poulose 
58659558330SSuzuki K Poulose 	return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
58759558330SSuzuki K Poulose }
58859558330SSuzuki K Poulose 
58959558330SSuzuki K Poulose static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
59059558330SSuzuki K Poulose {
59159558330SSuzuki K Poulose 	return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
59259558330SSuzuki K Poulose }
59359558330SSuzuki K Poulose 
594*e329fb75SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
595ab510027SVladimir Murzin {
596*e329fb75SChristoffer Dall 	struct kvm_vmid *vmid = &kvm->arch.vmid;
597*e329fb75SChristoffer Dall 	u64 vmid_field, baddr;
598*e329fb75SChristoffer Dall 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
599*e329fb75SChristoffer Dall 
600*e329fb75SChristoffer Dall 	baddr = kvm->arch.pgd_phys;
601*e329fb75SChristoffer Dall 	vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
602*e329fb75SChristoffer Dall 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
603ab510027SVladimir Murzin }
604ab510027SVladimir Murzin 
60537c43753SMarc Zyngier #endif /* __ASSEMBLY__ */
60637c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */
607