xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision 0f9d09b8e29bc8166f6584279aedc4a7a4038f68)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
237c43753SMarc Zyngier /*
337c43753SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
437c43753SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
537c43753SMarc Zyngier  */
637c43753SMarc Zyngier 
737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__
837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__
937c43753SMarc Zyngier 
1037c43753SMarc Zyngier #include <asm/page.h>
1137c43753SMarc Zyngier #include <asm/memory.h>
1220475f78SVladimir Murzin #include <asm/cpufeature.h>
1337c43753SMarc Zyngier 
1437c43753SMarc Zyngier /*
15cedbb8b7SMarc Zyngier  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
1637c43753SMarc Zyngier  * "negative" addresses. This makes it impossible to directly share
1737c43753SMarc Zyngier  * mappings with the kernel.
1837c43753SMarc Zyngier  *
1937c43753SMarc Zyngier  * Instead, give the HYP mode its own VA region at a fixed offset from
2037c43753SMarc Zyngier  * the kernel by just masking the top bits (which are all ones for a
2182a81bffSMarc Zyngier  * kernel address). We need to find out how many bits to mask.
22cedbb8b7SMarc Zyngier  *
2382a81bffSMarc Zyngier  * We want to build a set of page tables that cover both parts of the
2482a81bffSMarc Zyngier  * idmap (the trampoline page used to initialize EL2), and our normal
2582a81bffSMarc Zyngier  * runtime VA space, at the same time.
2682a81bffSMarc Zyngier  *
2782a81bffSMarc Zyngier  * Given that the kernel uses VA_BITS for its entire address space,
2882a81bffSMarc Zyngier  * and that half of that space (VA_BITS - 1) is used for the linear
2982a81bffSMarc Zyngier  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
3082a81bffSMarc Zyngier  *
3182a81bffSMarc Zyngier  * The main question is "Within the VA_BITS space, does EL2 use the
3282a81bffSMarc Zyngier  * top or the bottom half of that space to shadow the kernel's linear
3382a81bffSMarc Zyngier  * mapping?". As we need to idmap the trampoline page, this is
3482a81bffSMarc Zyngier  * determined by the range in which this page lives.
3582a81bffSMarc Zyngier  *
3682a81bffSMarc Zyngier  * If the page is in the bottom half, we have to use the top half. If
3782a81bffSMarc Zyngier  * the page is in the top half, we have to use the bottom half:
3882a81bffSMarc Zyngier  *
392077be67SLaura Abbott  * T = __pa_symbol(__hyp_idmap_text_start)
4082a81bffSMarc Zyngier  * if (T & BIT(VA_BITS - 1))
4182a81bffSMarc Zyngier  *	HYP_VA_MIN = 0  //idmap in upper half
4282a81bffSMarc Zyngier  * else
4382a81bffSMarc Zyngier  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
4482a81bffSMarc Zyngier  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
4582a81bffSMarc Zyngier  *
4682a81bffSMarc Zyngier  * When using VHE, there are no separate hyp mappings and all KVM
4782a81bffSMarc Zyngier  * functionality is already mapped as part of the main kernel
4882a81bffSMarc Zyngier  * mappings, and none of this applies in that case.
4937c43753SMarc Zyngier  */
50d53d9bc6SMarc Zyngier 
5137c43753SMarc Zyngier #ifdef __ASSEMBLY__
5237c43753SMarc Zyngier 
53cedbb8b7SMarc Zyngier #include <asm/alternative.h>
54cedbb8b7SMarc Zyngier 
5537c43753SMarc Zyngier /*
5637c43753SMarc Zyngier  * Convert a kernel VA into a HYP VA.
5737c43753SMarc Zyngier  * reg: VA to be converted.
58fd81e6bfSMarc Zyngier  *
592b4d1606SMarc Zyngier  * The actual code generation takes place in kvm_update_va_mask, and
602b4d1606SMarc Zyngier  * the instructions below are only there to reserve the space and
612b4d1606SMarc Zyngier  * perform the register allocation (kvm_update_va_mask uses the
622b4d1606SMarc Zyngier  * specific registers encoded in the instructions).
6337c43753SMarc Zyngier  */
6437c43753SMarc Zyngier .macro kern_hyp_va	reg
652b4d1606SMarc Zyngier alternative_cb kvm_update_va_mask
66ed57cac8SMarc Zyngier 	and     \reg, \reg, #1		/* mask with va_mask */
67ed57cac8SMarc Zyngier 	ror	\reg, \reg, #1		/* rotate to the first tag bit */
68ed57cac8SMarc Zyngier 	add	\reg, \reg, #0		/* insert the low 12 bits of the tag */
69ed57cac8SMarc Zyngier 	add	\reg, \reg, #0, lsl 12	/* insert the top 12 bits of the tag */
70ed57cac8SMarc Zyngier 	ror	\reg, \reg, #63		/* rotate back */
712b4d1606SMarc Zyngier alternative_cb_end
7237c43753SMarc Zyngier .endm
7337c43753SMarc Zyngier 
7437c43753SMarc Zyngier #else
7537c43753SMarc Zyngier 
7665fddcfcSMike Rapoport #include <linux/pgtable.h>
7738f791a4SChristoffer Dall #include <asm/pgalloc.h>
7802f7760eSWill Deacon #include <asm/cache.h>
7937c43753SMarc Zyngier #include <asm/cacheflush.h>
80e4c5a685SArd Biesheuvel #include <asm/mmu_context.h>
8137c43753SMarc Zyngier 
822b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt,
832b4d1606SMarc Zyngier 			__le32 *origptr, __le32 *updptr, int nr_inst);
840492747cSSebastian Andrzej Siewior void kvm_compute_layout(void);
852b4d1606SMarc Zyngier 
865c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v)
87fd81e6bfSMarc Zyngier {
88ed57cac8SMarc Zyngier 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
89ed57cac8SMarc Zyngier 				    "ror %0, %0, #1\n"
90ed57cac8SMarc Zyngier 				    "add %0, %0, #0\n"
91ed57cac8SMarc Zyngier 				    "add %0, %0, #0, lsl 12\n"
92ed57cac8SMarc Zyngier 				    "ror %0, %0, #63\n",
932b4d1606SMarc Zyngier 				    kvm_update_va_mask)
942b4d1606SMarc Zyngier 		     : "+r" (v));
95fd81e6bfSMarc Zyngier 	return v;
96fd81e6bfSMarc Zyngier }
97fd81e6bfSMarc Zyngier 
9894d0e598SMarc Zyngier #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
9937c43753SMarc Zyngier 
10037c43753SMarc Zyngier /*
1011b44471bSZenghui Yu  * We currently support using a VM-specified IPA size. For backward
1021b44471bSZenghui Yu  * compatibility, the default IPA size is fixed to 40bits.
10337c43753SMarc Zyngier  */
104dbff124eSJoel Schopp #define KVM_PHYS_SHIFT	(40)
105e55cac5bSSuzuki K Poulose 
10613ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
107e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
108e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
10937c43753SMarc Zyngier 
110865b30cdSSuzuki K Poulose static inline bool kvm_page_empty(void *ptr)
111865b30cdSSuzuki K Poulose {
112865b30cdSSuzuki K Poulose 	struct page *ptr_page = virt_to_page(ptr);
113865b30cdSSuzuki K Poulose 	return page_count(ptr_page) == 1;
114865b30cdSSuzuki K Poulose }
11537c43753SMarc Zyngier 
116*0f9d09b8SWill Deacon #include <asm/kvm_pgtable.h>
117c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h>
118c0ef6326SSuzuki K Poulose 
119*0f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
120807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
1211bb32a44SMarc Zyngier 			   void __iomem **kaddr,
1221bb32a44SMarc Zyngier 			   void __iomem **haddr);
123dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
124dc2e4633SMarc Zyngier 			     void **haddr);
12537c43753SMarc Zyngier void free_hyp_pgds(void);
12637c43753SMarc Zyngier 
127957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm);
128a0e50aa3SChristoffer Dall int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
129a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
13037c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
131c40f2f8fSArd Biesheuvel 			  phys_addr_t pa, unsigned long size, bool writable);
13237c43753SMarc Zyngier 
13374cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
13437c43753SMarc Zyngier 
13537c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void);
13637c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void);
13737c43753SMarc Zyngier int kvm_mmu_init(void);
1380db9dd8aSMarc Zyngier #define kvm_mk_pmd(ptep)					\
1390db9dd8aSMarc Zyngier 	__pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
1400db9dd8aSMarc Zyngier #define kvm_mk_pud(pmdp)					\
1410db9dd8aSMarc Zyngier 	__pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
142e9f63768SMike Rapoport #define kvm_mk_p4d(pmdp)					\
143e9f63768SMike Rapoport 	__p4d(__phys_to_p4d_val(__pa(pmdp)) | PUD_TYPE_TABLE)
1440db9dd8aSMarc Zyngier 
145b8e0ba7cSPunit Agrawal #define kvm_set_pud(pudp, pud)		set_pud(pudp, pud)
146b8e0ba7cSPunit Agrawal 
147f8df7338SPunit Agrawal #define kvm_pfn_pte(pfn, prot)		pfn_pte(pfn, prot)
148f8df7338SPunit Agrawal #define kvm_pfn_pmd(pfn, prot)		pfn_pmd(pfn, prot)
149b8e0ba7cSPunit Agrawal #define kvm_pfn_pud(pfn, prot)		pfn_pud(pfn, prot)
150f8df7338SPunit Agrawal 
151eb3f0624SPunit Agrawal #define kvm_pud_pfn(pud)		pud_pfn(pud)
152eb3f0624SPunit Agrawal 
153f8df7338SPunit Agrawal #define kvm_pmd_mkhuge(pmd)		pmd_mkhuge(pmd)
154b8e0ba7cSPunit Agrawal #define kvm_pud_mkhuge(pud)		pud_mkhuge(pud)
155f8df7338SPunit Agrawal 
15606485053SCatalin Marinas static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
15737c43753SMarc Zyngier {
15806485053SCatalin Marinas 	pte_val(pte) |= PTE_S2_RDWR;
15906485053SCatalin Marinas 	return pte;
16037c43753SMarc Zyngier }
16137c43753SMarc Zyngier 
16206485053SCatalin Marinas static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
163ad361f09SChristoffer Dall {
16406485053SCatalin Marinas 	pmd_val(pmd) |= PMD_S2_RDWR;
16506485053SCatalin Marinas 	return pmd;
166ad361f09SChristoffer Dall }
167ad361f09SChristoffer Dall 
168b8e0ba7cSPunit Agrawal static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
169b8e0ba7cSPunit Agrawal {
170b8e0ba7cSPunit Agrawal 	pud_val(pud) |= PUD_S2_RDWR;
171b8e0ba7cSPunit Agrawal 	return pud;
172b8e0ba7cSPunit Agrawal }
173b8e0ba7cSPunit Agrawal 
174d0e22b4aSMarc Zyngier static inline pte_t kvm_s2pte_mkexec(pte_t pte)
175d0e22b4aSMarc Zyngier {
176d0e22b4aSMarc Zyngier 	pte_val(pte) &= ~PTE_S2_XN;
177d0e22b4aSMarc Zyngier 	return pte;
178d0e22b4aSMarc Zyngier }
179d0e22b4aSMarc Zyngier 
180d0e22b4aSMarc Zyngier static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
181d0e22b4aSMarc Zyngier {
182d0e22b4aSMarc Zyngier 	pmd_val(pmd) &= ~PMD_S2_XN;
183d0e22b4aSMarc Zyngier 	return pmd;
184d0e22b4aSMarc Zyngier }
185d0e22b4aSMarc Zyngier 
186b8e0ba7cSPunit Agrawal static inline pud_t kvm_s2pud_mkexec(pud_t pud)
187b8e0ba7cSPunit Agrawal {
188b8e0ba7cSPunit Agrawal 	pud_val(pud) &= ~PUD_S2_XN;
189b8e0ba7cSPunit Agrawal 	return pud;
190b8e0ba7cSPunit Agrawal }
191b8e0ba7cSPunit Agrawal 
19220a004e7SWill Deacon static inline void kvm_set_s2pte_readonly(pte_t *ptep)
1938199ed0eSMario Smarduch {
1940966253dSCatalin Marinas 	pteval_t old_pteval, pteval;
19506485053SCatalin Marinas 
19620a004e7SWill Deacon 	pteval = READ_ONCE(pte_val(*ptep));
1970966253dSCatalin Marinas 	do {
1980966253dSCatalin Marinas 		old_pteval = pteval;
1990966253dSCatalin Marinas 		pteval &= ~PTE_S2_RDWR;
2000966253dSCatalin Marinas 		pteval |= PTE_S2_RDONLY;
20120a004e7SWill Deacon 		pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
2020966253dSCatalin Marinas 	} while (pteval != old_pteval);
2038199ed0eSMario Smarduch }
2048199ed0eSMario Smarduch 
20520a004e7SWill Deacon static inline bool kvm_s2pte_readonly(pte_t *ptep)
2068199ed0eSMario Smarduch {
20720a004e7SWill Deacon 	return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
2088199ed0eSMario Smarduch }
2098199ed0eSMario Smarduch 
21020a004e7SWill Deacon static inline bool kvm_s2pte_exec(pte_t *ptep)
2117a3796d2SMarc Zyngier {
21220a004e7SWill Deacon 	return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
2137a3796d2SMarc Zyngier }
2147a3796d2SMarc Zyngier 
21520a004e7SWill Deacon static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
2168199ed0eSMario Smarduch {
21720a004e7SWill Deacon 	kvm_set_s2pte_readonly((pte_t *)pmdp);
2188199ed0eSMario Smarduch }
2198199ed0eSMario Smarduch 
22020a004e7SWill Deacon static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
2218199ed0eSMario Smarduch {
22220a004e7SWill Deacon 	return kvm_s2pte_readonly((pte_t *)pmdp);
22338f791a4SChristoffer Dall }
22438f791a4SChristoffer Dall 
22520a004e7SWill Deacon static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
2267a3796d2SMarc Zyngier {
22720a004e7SWill Deacon 	return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
2287a3796d2SMarc Zyngier }
2297a3796d2SMarc Zyngier 
2304ea5af53SPunit Agrawal static inline void kvm_set_s2pud_readonly(pud_t *pudp)
2314ea5af53SPunit Agrawal {
2324ea5af53SPunit Agrawal 	kvm_set_s2pte_readonly((pte_t *)pudp);
2334ea5af53SPunit Agrawal }
2344ea5af53SPunit Agrawal 
2354ea5af53SPunit Agrawal static inline bool kvm_s2pud_readonly(pud_t *pudp)
2364ea5af53SPunit Agrawal {
2374ea5af53SPunit Agrawal 	return kvm_s2pte_readonly((pte_t *)pudp);
2384ea5af53SPunit Agrawal }
2394ea5af53SPunit Agrawal 
24086d1c55eSPunit Agrawal static inline bool kvm_s2pud_exec(pud_t *pudp)
24186d1c55eSPunit Agrawal {
24286d1c55eSPunit Agrawal 	return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
24386d1c55eSPunit Agrawal }
24486d1c55eSPunit Agrawal 
245eb3f0624SPunit Agrawal static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
246eb3f0624SPunit Agrawal {
247eb3f0624SPunit Agrawal 	return pud_mkyoung(pud);
248eb3f0624SPunit Agrawal }
249eb3f0624SPunit Agrawal 
25035a63966SPunit Agrawal static inline bool kvm_s2pud_young(pud_t pud)
25135a63966SPunit Agrawal {
25235a63966SPunit Agrawal 	return pud_young(pud);
25335a63966SPunit Agrawal }
25435a63966SPunit Agrawal 
255e9f63768SMike Rapoport 
25637c43753SMarc Zyngier struct kvm;
25737c43753SMarc Zyngier 
2582d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
2592d58b733SMarc Zyngier 
2602d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
2612d58b733SMarc Zyngier {
2628d404c4cSChristoffer Dall 	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
2632d58b733SMarc Zyngier }
2642d58b733SMarc Zyngier 
26517ab9d57SMarc Zyngier static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
26637c43753SMarc Zyngier {
2670d3e4d4fSMarc Zyngier 	void *va = page_address(pfn_to_page(pfn));
2680d3e4d4fSMarc Zyngier 
269e48d53a9SMarc Zyngier 	/*
270e48d53a9SMarc Zyngier 	 * With FWB, we ensure that the guest always accesses memory using
271e48d53a9SMarc Zyngier 	 * cacheable attributes, and we don't have to clean to PoC when
272e48d53a9SMarc Zyngier 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
273e48d53a9SMarc Zyngier 	 * PoU is not required either in this case.
274e48d53a9SMarc Zyngier 	 */
275e48d53a9SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
276e48d53a9SMarc Zyngier 		return;
277e48d53a9SMarc Zyngier 
2780d3e4d4fSMarc Zyngier 	kvm_flush_dcache_to_poc(va, size);
279a15f6939SMarc Zyngier }
2802d58b733SMarc Zyngier 
28117ab9d57SMarc Zyngier static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
282a15f6939SMarc Zyngier 						  unsigned long size)
283a15f6939SMarc Zyngier {
28487da236eSWill Deacon 	if (icache_is_aliasing()) {
28537c43753SMarc Zyngier 		/* any kind of VIPT cache */
28637c43753SMarc Zyngier 		__flush_icache_all();
28787da236eSWill Deacon 	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
28887da236eSWill Deacon 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
289a15f6939SMarc Zyngier 		void *va = page_address(pfn_to_page(pfn));
290a15f6939SMarc Zyngier 
2914fee9473SMarc Zyngier 		invalidate_icache_range((unsigned long)va,
29287da236eSWill Deacon 					(unsigned long)va + size);
29337c43753SMarc Zyngier 	}
29437c43753SMarc Zyngier }
29537c43753SMarc Zyngier 
296363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pte(pte_t pte)
297363ef89fSMarc Zyngier {
298e48d53a9SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
299363ef89fSMarc Zyngier 		struct page *page = pte_page(pte);
300363ef89fSMarc Zyngier 		kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
301363ef89fSMarc Zyngier 	}
302e48d53a9SMarc Zyngier }
303363ef89fSMarc Zyngier 
304363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
305363ef89fSMarc Zyngier {
306e48d53a9SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
307363ef89fSMarc Zyngier 		struct page *page = pmd_page(pmd);
308363ef89fSMarc Zyngier 		kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
309363ef89fSMarc Zyngier 	}
310e48d53a9SMarc Zyngier }
311363ef89fSMarc Zyngier 
312363ef89fSMarc Zyngier static inline void __kvm_flush_dcache_pud(pud_t pud)
313363ef89fSMarc Zyngier {
314e48d53a9SMarc Zyngier 	if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
315363ef89fSMarc Zyngier 		struct page *page = pud_page(pud);
316363ef89fSMarc Zyngier 		kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
317363ef89fSMarc Zyngier 	}
318e48d53a9SMarc Zyngier }
319363ef89fSMarc Zyngier 
3203c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu);
3213c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
3229d218a1fSMarc Zyngier 
32320475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void)
32420475f78SVladimir Murzin {
32546823dd1SDave Martin 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
32620475f78SVladimir Murzin 
327c73433fcSAnshuman Khandual 	return get_vmid_bits(reg);
32820475f78SVladimir Murzin }
32920475f78SVladimir Murzin 
330bf308242SAndre Przywara /*
331bf308242SAndre Przywara  * We are not in the kvm->srcu critical section most of the time, so we take
332bf308242SAndre Przywara  * the SRCU read lock here. Since we copy the data from the user page, we
333bf308242SAndre Przywara  * can immediately drop the lock again.
334bf308242SAndre Przywara  */
335bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm,
336bf308242SAndre Przywara 				      gpa_t gpa, void *data, unsigned long len)
337bf308242SAndre Przywara {
338bf308242SAndre Przywara 	int srcu_idx = srcu_read_lock(&kvm->srcu);
339bf308242SAndre Przywara 	int ret = kvm_read_guest(kvm, gpa, data, len);
340bf308242SAndre Przywara 
341bf308242SAndre Przywara 	srcu_read_unlock(&kvm->srcu, srcu_idx);
342bf308242SAndre Przywara 
343bf308242SAndre Przywara 	return ret;
344bf308242SAndre Przywara }
345bf308242SAndre Przywara 
346a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
347a6ecfb11SMarc Zyngier 				       const void *data, unsigned long len)
348a6ecfb11SMarc Zyngier {
349a6ecfb11SMarc Zyngier 	int srcu_idx = srcu_read_lock(&kvm->srcu);
350a6ecfb11SMarc Zyngier 	int ret = kvm_write_guest(kvm, gpa, data, len);
351a6ecfb11SMarc Zyngier 
352a6ecfb11SMarc Zyngier 	srcu_read_unlock(&kvm->srcu, srcu_idx);
353a6ecfb11SMarc Zyngier 
354a6ecfb11SMarc Zyngier 	return ret;
355a6ecfb11SMarc Zyngier }
356a6ecfb11SMarc Zyngier 
357dee39247SMarc Zyngier #ifdef CONFIG_KVM_INDIRECT_VECTORS
358dee39247SMarc Zyngier /*
359dee39247SMarc Zyngier  * EL2 vectors can be mapped and rerouted in a number of ways,
360dee39247SMarc Zyngier  * depending on the kernel configuration and CPU present:
361dee39247SMarc Zyngier  *
362dee39247SMarc Zyngier  * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
363dee39247SMarc Zyngier  *   hardening sequence is placed in one of the vector slots, which is
364dee39247SMarc Zyngier  *   executed before jumping to the real vectors.
365dee39247SMarc Zyngier  *
366dee39247SMarc Zyngier  * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
367dee39247SMarc Zyngier  *   ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
368dee39247SMarc Zyngier  *   hardening sequence is mapped next to the idmap page, and executed
369dee39247SMarc Zyngier  *   before jumping to the real vectors.
370dee39247SMarc Zyngier  *
371dee39247SMarc Zyngier  * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
372dee39247SMarc Zyngier  *   empty slot is selected, mapped next to the idmap page, and
373dee39247SMarc Zyngier  *   executed before jumping to the real vectors.
374dee39247SMarc Zyngier  *
375dee39247SMarc Zyngier  * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
376dee39247SMarc Zyngier  * VHE, as we don't have hypervisor-specific mappings. If the system
377dee39247SMarc Zyngier  * is VHE and yet selects this capability, it will be ignored.
378dee39247SMarc Zyngier  */
3796840bdd7SMarc Zyngier #include <asm/mmu.h>
3806840bdd7SMarc Zyngier 
381dee39247SMarc Zyngier extern void *__kvm_bp_vect_base;
382dee39247SMarc Zyngier extern int __kvm_harden_el2_vector_slot;
383dee39247SMarc Zyngier 
384438f711cSDavid Brazdil /*  This is called on both VHE and !VHE systems */
3856840bdd7SMarc Zyngier static inline void *kvm_get_hyp_vector(void)
3866840bdd7SMarc Zyngier {
3876840bdd7SMarc Zyngier 	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
388dee39247SMarc Zyngier 	void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
389dee39247SMarc Zyngier 	int slot = -1;
3906840bdd7SMarc Zyngier 
391dee39247SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
3926e52aab9SMark Brown 		vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
393dee39247SMarc Zyngier 		slot = data->hyp_vectors_slot;
3946840bdd7SMarc Zyngier 	}
3956840bdd7SMarc Zyngier 
396dee39247SMarc Zyngier 	if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
397dee39247SMarc Zyngier 		vect = __kvm_bp_vect_base;
398dee39247SMarc Zyngier 		if (slot == -1)
399dee39247SMarc Zyngier 			slot = __kvm_harden_el2_vector_slot;
400dee39247SMarc Zyngier 	}
401dee39247SMarc Zyngier 
402dee39247SMarc Zyngier 	if (slot != -1)
403dee39247SMarc Zyngier 		vect += slot * SZ_2K;
404dee39247SMarc Zyngier 
4056840bdd7SMarc Zyngier 	return vect;
4066840bdd7SMarc Zyngier }
4076840bdd7SMarc Zyngier 
408dee39247SMarc Zyngier /*  This is only called on a !VHE system */
4096840bdd7SMarc Zyngier static inline int kvm_map_vectors(void)
4106840bdd7SMarc Zyngier {
411dee39247SMarc Zyngier 	/*
412dee39247SMarc Zyngier 	 * HBP  = ARM64_HARDEN_BRANCH_PREDICTOR
413dee39247SMarc Zyngier 	 * HEL2 = ARM64_HARDEN_EL2_VECTORS
414dee39247SMarc Zyngier 	 *
415dee39247SMarc Zyngier 	 * !HBP + !HEL2 -> use direct vectors
416dee39247SMarc Zyngier 	 *  HBP + !HEL2 -> use hardened vectors in place
417dee39247SMarc Zyngier 	 * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
418dee39247SMarc Zyngier 	 *  HBP +  HEL2 -> use hardened vertors and use exec mapping
419dee39247SMarc Zyngier 	 */
420dee39247SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
4216e52aab9SMark Brown 		__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
422dee39247SMarc Zyngier 		__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
4236840bdd7SMarc Zyngier 	}
4246840bdd7SMarc Zyngier 
425dee39247SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
4266e52aab9SMark Brown 		phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
4276e52aab9SMark Brown 		unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
428dee39247SMarc Zyngier 
429dee39247SMarc Zyngier 		/*
430dee39247SMarc Zyngier 		 * Always allocate a spare vector slot, as we don't
431dee39247SMarc Zyngier 		 * know yet which CPUs have a BP hardening slot that
432dee39247SMarc Zyngier 		 * we can reuse.
433dee39247SMarc Zyngier 		 */
434dee39247SMarc Zyngier 		__kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
435dee39247SMarc Zyngier 		BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
436dee39247SMarc Zyngier 		return create_hyp_exec_mappings(vect_pa, size,
437dee39247SMarc Zyngier 						&__kvm_bp_vect_base);
438dee39247SMarc Zyngier 	}
439dee39247SMarc Zyngier 
440dee39247SMarc Zyngier 	return 0;
441dee39247SMarc Zyngier }
4426840bdd7SMarc Zyngier #else
4436840bdd7SMarc Zyngier static inline void *kvm_get_hyp_vector(void)
4446840bdd7SMarc Zyngier {
4453c5e8123SMarc Zyngier 	return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
4466840bdd7SMarc Zyngier }
4476840bdd7SMarc Zyngier 
4486840bdd7SMarc Zyngier static inline int kvm_map_vectors(void)
4496840bdd7SMarc Zyngier {
4506840bdd7SMarc Zyngier 	return 0;
4516840bdd7SMarc Zyngier }
4526840bdd7SMarc Zyngier #endif
4536840bdd7SMarc Zyngier 
45455e3748eSMarc Zyngier #ifdef CONFIG_ARM64_SSBD
45555e3748eSMarc Zyngier DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
45655e3748eSMarc Zyngier 
45755e3748eSMarc Zyngier static inline int hyp_map_aux_data(void)
45855e3748eSMarc Zyngier {
45955e3748eSMarc Zyngier 	int cpu, err;
46055e3748eSMarc Zyngier 
46155e3748eSMarc Zyngier 	for_each_possible_cpu(cpu) {
46255e3748eSMarc Zyngier 		u64 *ptr;
46355e3748eSMarc Zyngier 
46455e3748eSMarc Zyngier 		ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
46555e3748eSMarc Zyngier 		err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
46655e3748eSMarc Zyngier 		if (err)
46755e3748eSMarc Zyngier 			return err;
46855e3748eSMarc Zyngier 	}
46955e3748eSMarc Zyngier 	return 0;
47055e3748eSMarc Zyngier }
47155e3748eSMarc Zyngier #else
47255e3748eSMarc Zyngier static inline int hyp_map_aux_data(void)
47355e3748eSMarc Zyngier {
47455e3748eSMarc Zyngier 	return 0;
47555e3748eSMarc Zyngier }
47655e3748eSMarc Zyngier #endif
47755e3748eSMarc Zyngier 
478529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
479529c4b05SKristina Martsenko 
48059558330SSuzuki K Poulose /*
48159558330SSuzuki K Poulose  * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
48259558330SSuzuki K Poulose  * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
48359558330SSuzuki K Poulose  * 52bit IPS.
48459558330SSuzuki K Poulose  */
48559558330SSuzuki K Poulose static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
48659558330SSuzuki K Poulose {
48759558330SSuzuki K Poulose 	int x = ARM64_VTTBR_X(ipa_shift, levels);
48859558330SSuzuki K Poulose 
48959558330SSuzuki K Poulose 	return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
49059558330SSuzuki K Poulose }
49159558330SSuzuki K Poulose 
49259558330SSuzuki K Poulose static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
49359558330SSuzuki K Poulose {
49459558330SSuzuki K Poulose 	unsigned int x = arm64_vttbr_x(ipa_shift, levels);
49559558330SSuzuki K Poulose 
49659558330SSuzuki K Poulose 	return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
49759558330SSuzuki K Poulose }
49859558330SSuzuki K Poulose 
49959558330SSuzuki K Poulose static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
50059558330SSuzuki K Poulose {
50159558330SSuzuki K Poulose 	return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
50259558330SSuzuki K Poulose }
50359558330SSuzuki K Poulose 
504a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
505ab510027SVladimir Murzin {
506a0e50aa3SChristoffer Dall 	struct kvm_vmid *vmid = &mmu->vmid;
507e329fb75SChristoffer Dall 	u64 vmid_field, baddr;
508e329fb75SChristoffer Dall 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
509e329fb75SChristoffer Dall 
510a0e50aa3SChristoffer Dall 	baddr = mmu->pgd_phys;
511e329fb75SChristoffer Dall 	vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
512e329fb75SChristoffer Dall 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
513ab510027SVladimir Murzin }
514ab510027SVladimir Murzin 
515fe677be9SMarc Zyngier /*
516fe677be9SMarc Zyngier  * Must be called from hyp code running at EL2 with an updated VTTBR
517fe677be9SMarc Zyngier  * and interrupts disabled.
518fe677be9SMarc Zyngier  */
519a0e50aa3SChristoffer Dall static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
520fe677be9SMarc Zyngier {
521a0e50aa3SChristoffer Dall 	write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
522a0e50aa3SChristoffer Dall 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
523fe677be9SMarc Zyngier 
524fe677be9SMarc Zyngier 	/*
525fe677be9SMarc Zyngier 	 * ARM errata 1165522 and 1530923 require the actual execution of the
526fe677be9SMarc Zyngier 	 * above before we can switch to the EL1/EL0 translation regime used by
527fe677be9SMarc Zyngier 	 * the guest.
528fe677be9SMarc Zyngier 	 */
529fe677be9SMarc Zyngier 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
530fe677be9SMarc Zyngier }
531fe677be9SMarc Zyngier 
53237c43753SMarc Zyngier #endif /* __ASSEMBLY__ */
53337c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */
534