xref: /openbmc/linux/arch/arm64/include/asm/kvm_mmu.h (revision 68b824e428c5fb5c3dc5ef80b1543e767534b58e)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
237c43753SMarc Zyngier /*
337c43753SMarc Zyngier  * Copyright (C) 2012,2013 - ARM Ltd
437c43753SMarc Zyngier  * Author: Marc Zyngier <marc.zyngier@arm.com>
537c43753SMarc Zyngier  */
637c43753SMarc Zyngier 
737c43753SMarc Zyngier #ifndef __ARM64_KVM_MMU_H__
837c43753SMarc Zyngier #define __ARM64_KVM_MMU_H__
937c43753SMarc Zyngier 
1037c43753SMarc Zyngier #include <asm/page.h>
1137c43753SMarc Zyngier #include <asm/memory.h>
129ef2b48bSWill Deacon #include <asm/mmu.h>
1320475f78SVladimir Murzin #include <asm/cpufeature.h>
1437c43753SMarc Zyngier 
1537c43753SMarc Zyngier /*
16cedbb8b7SMarc Zyngier  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
1737c43753SMarc Zyngier  * "negative" addresses. This makes it impossible to directly share
1837c43753SMarc Zyngier  * mappings with the kernel.
1937c43753SMarc Zyngier  *
2037c43753SMarc Zyngier  * Instead, give the HYP mode its own VA region at a fixed offset from
2137c43753SMarc Zyngier  * the kernel by just masking the top bits (which are all ones for a
2282a81bffSMarc Zyngier  * kernel address). We need to find out how many bits to mask.
23cedbb8b7SMarc Zyngier  *
2482a81bffSMarc Zyngier  * We want to build a set of page tables that cover both parts of the
2582a81bffSMarc Zyngier  * idmap (the trampoline page used to initialize EL2), and our normal
2682a81bffSMarc Zyngier  * runtime VA space, at the same time.
2782a81bffSMarc Zyngier  *
2882a81bffSMarc Zyngier  * Given that the kernel uses VA_BITS for its entire address space,
2982a81bffSMarc Zyngier  * and that half of that space (VA_BITS - 1) is used for the linear
3082a81bffSMarc Zyngier  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
3182a81bffSMarc Zyngier  *
3282a81bffSMarc Zyngier  * The main question is "Within the VA_BITS space, does EL2 use the
3382a81bffSMarc Zyngier  * top or the bottom half of that space to shadow the kernel's linear
3482a81bffSMarc Zyngier  * mapping?". As we need to idmap the trampoline page, this is
3582a81bffSMarc Zyngier  * determined by the range in which this page lives.
3682a81bffSMarc Zyngier  *
3782a81bffSMarc Zyngier  * If the page is in the bottom half, we have to use the top half. If
3882a81bffSMarc Zyngier  * the page is in the top half, we have to use the bottom half:
3982a81bffSMarc Zyngier  *
402077be67SLaura Abbott  * T = __pa_symbol(__hyp_idmap_text_start)
4182a81bffSMarc Zyngier  * if (T & BIT(VA_BITS - 1))
4282a81bffSMarc Zyngier  *	HYP_VA_MIN = 0  //idmap in upper half
4382a81bffSMarc Zyngier  * else
4482a81bffSMarc Zyngier  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
4582a81bffSMarc Zyngier  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
4682a81bffSMarc Zyngier  *
4782a81bffSMarc Zyngier  * When using VHE, there are no separate hyp mappings and all KVM
4882a81bffSMarc Zyngier  * functionality is already mapped as part of the main kernel
4982a81bffSMarc Zyngier  * mappings, and none of this applies in that case.
5037c43753SMarc Zyngier  */
51d53d9bc6SMarc Zyngier 
5237c43753SMarc Zyngier #ifdef __ASSEMBLY__
5337c43753SMarc Zyngier 
54cedbb8b7SMarc Zyngier #include <asm/alternative.h>
55cedbb8b7SMarc Zyngier 
5637c43753SMarc Zyngier /*
5737c43753SMarc Zyngier  * Convert a kernel VA into a HYP VA.
5837c43753SMarc Zyngier  * reg: VA to be converted.
59fd81e6bfSMarc Zyngier  *
602b4d1606SMarc Zyngier  * The actual code generation takes place in kvm_update_va_mask, and
612b4d1606SMarc Zyngier  * the instructions below are only there to reserve the space and
622b4d1606SMarc Zyngier  * perform the register allocation (kvm_update_va_mask uses the
632b4d1606SMarc Zyngier  * specific registers encoded in the instructions).
6437c43753SMarc Zyngier  */
6537c43753SMarc Zyngier .macro kern_hyp_va	reg
662b4d1606SMarc Zyngier alternative_cb kvm_update_va_mask
67ed57cac8SMarc Zyngier 	and     \reg, \reg, #1		/* mask with va_mask */
68ed57cac8SMarc Zyngier 	ror	\reg, \reg, #1		/* rotate to the first tag bit */
69ed57cac8SMarc Zyngier 	add	\reg, \reg, #0		/* insert the low 12 bits of the tag */
70ed57cac8SMarc Zyngier 	add	\reg, \reg, #0, lsl 12	/* insert the top 12 bits of the tag */
71ed57cac8SMarc Zyngier 	ror	\reg, \reg, #63		/* rotate back */
722b4d1606SMarc Zyngier alternative_cb_end
7337c43753SMarc Zyngier .endm
7437c43753SMarc Zyngier 
75*68b824e4SMarc Zyngier /*
76*68b824e4SMarc Zyngier  * Convert a kernel image address to a PA
77*68b824e4SMarc Zyngier  * reg: kernel address to be converted in place
78*68b824e4SMarc Zyngier  * tmp: temporary register
79*68b824e4SMarc Zyngier  *
80*68b824e4SMarc Zyngier  * The actual code generation takes place in kvm_get_kimage_voffset, and
81*68b824e4SMarc Zyngier  * the instructions below are only there to reserve the space and
82*68b824e4SMarc Zyngier  * perform the register allocation (kvm_get_kimage_voffset uses the
83*68b824e4SMarc Zyngier  * specific registers encoded in the instructions).
84*68b824e4SMarc Zyngier  */
85*68b824e4SMarc Zyngier .macro kimg_pa reg, tmp
86*68b824e4SMarc Zyngier alternative_cb kvm_get_kimage_voffset
87*68b824e4SMarc Zyngier 	movz	\tmp, #0
88*68b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #16
89*68b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #32
90*68b824e4SMarc Zyngier 	movk	\tmp, #0, lsl #48
91*68b824e4SMarc Zyngier alternative_cb_end
92*68b824e4SMarc Zyngier 
93*68b824e4SMarc Zyngier 	/* reg = __pa(reg) */
94*68b824e4SMarc Zyngier 	sub	\reg, \reg, \tmp
95*68b824e4SMarc Zyngier .endm
96*68b824e4SMarc Zyngier 
9737c43753SMarc Zyngier #else
9837c43753SMarc Zyngier 
9965fddcfcSMike Rapoport #include <linux/pgtable.h>
10038f791a4SChristoffer Dall #include <asm/pgalloc.h>
10102f7760eSWill Deacon #include <asm/cache.h>
10237c43753SMarc Zyngier #include <asm/cacheflush.h>
103e4c5a685SArd Biesheuvel #include <asm/mmu_context.h>
10437c43753SMarc Zyngier 
1052b4d1606SMarc Zyngier void kvm_update_va_mask(struct alt_instr *alt,
1062b4d1606SMarc Zyngier 			__le32 *origptr, __le32 *updptr, int nr_inst);
1070492747cSSebastian Andrzej Siewior void kvm_compute_layout(void);
1082b4d1606SMarc Zyngier 
1095c37f1aeSJames Morse static __always_inline unsigned long __kern_hyp_va(unsigned long v)
110fd81e6bfSMarc Zyngier {
111ed57cac8SMarc Zyngier 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
112ed57cac8SMarc Zyngier 				    "ror %0, %0, #1\n"
113ed57cac8SMarc Zyngier 				    "add %0, %0, #0\n"
114ed57cac8SMarc Zyngier 				    "add %0, %0, #0, lsl 12\n"
115ed57cac8SMarc Zyngier 				    "ror %0, %0, #63\n",
1162b4d1606SMarc Zyngier 				    kvm_update_va_mask)
1172b4d1606SMarc Zyngier 		     : "+r" (v));
118fd81e6bfSMarc Zyngier 	return v;
119fd81e6bfSMarc Zyngier }
120fd81e6bfSMarc Zyngier 
12194d0e598SMarc Zyngier #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
12237c43753SMarc Zyngier 
1231db9d9deSMarc Zyngier static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
1241db9d9deSMarc Zyngier {
1251db9d9deSMarc Zyngier 	unsigned long offset;
1261db9d9deSMarc Zyngier 
1271db9d9deSMarc Zyngier 	asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
1281db9d9deSMarc Zyngier 				    "movk %0, #0, lsl #16\n"
1291db9d9deSMarc Zyngier 				    "movk %0, #0, lsl #32\n"
1301db9d9deSMarc Zyngier 				    "movk %0, #0, lsl #48\n",
1311db9d9deSMarc Zyngier 				    kvm_update_kimg_phys_offset)
1321db9d9deSMarc Zyngier 		     : "=r" (offset));
1331db9d9deSMarc Zyngier 
1341db9d9deSMarc Zyngier 	return __kern_hyp_va((v - offset) | PAGE_OFFSET);
1351db9d9deSMarc Zyngier }
1361db9d9deSMarc Zyngier 
1371db9d9deSMarc Zyngier #define kimg_fn_hyp_va(v) 	((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
1381db9d9deSMarc Zyngier 
1391db9d9deSMarc Zyngier #define kimg_fn_ptr(x)	(typeof(x) **)(x)
1401db9d9deSMarc Zyngier 
14137c43753SMarc Zyngier /*
1421b44471bSZenghui Yu  * We currently support using a VM-specified IPA size. For backward
1431b44471bSZenghui Yu  * compatibility, the default IPA size is fixed to 40bits.
14437c43753SMarc Zyngier  */
145dbff124eSJoel Schopp #define KVM_PHYS_SHIFT	(40)
146e55cac5bSSuzuki K Poulose 
14713ac4bbcSSuzuki K Poulose #define kvm_phys_shift(kvm)		VTCR_EL2_IPA(kvm->arch.vtcr)
148e55cac5bSSuzuki K Poulose #define kvm_phys_size(kvm)		(_AC(1, ULL) << kvm_phys_shift(kvm))
149e55cac5bSSuzuki K Poulose #define kvm_phys_mask(kvm)		(kvm_phys_size(kvm) - _AC(1, ULL))
15037c43753SMarc Zyngier 
1510f9d09b8SWill Deacon #include <asm/kvm_pgtable.h>
152c0ef6326SSuzuki K Poulose #include <asm/stage2_pgtable.h>
153c0ef6326SSuzuki K Poulose 
1540f9d09b8SWill Deacon int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
155807a3784SMarc Zyngier int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
1561bb32a44SMarc Zyngier 			   void __iomem **kaddr,
1571bb32a44SMarc Zyngier 			   void __iomem **haddr);
158dc2e4633SMarc Zyngier int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
159dc2e4633SMarc Zyngier 			     void **haddr);
16037c43753SMarc Zyngier void free_hyp_pgds(void);
16137c43753SMarc Zyngier 
162957db105SChristoffer Dall void stage2_unmap_vm(struct kvm *kvm);
163a0e50aa3SChristoffer Dall int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
164a0e50aa3SChristoffer Dall void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
16537c43753SMarc Zyngier int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
166c40f2f8fSArd Biesheuvel 			  phys_addr_t pa, unsigned long size, bool writable);
16737c43753SMarc Zyngier 
16874cc7e0cSTianjia Zhang int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
16937c43753SMarc Zyngier 
17037c43753SMarc Zyngier phys_addr_t kvm_mmu_get_httbr(void);
17137c43753SMarc Zyngier phys_addr_t kvm_get_idmap_vector(void);
17237c43753SMarc Zyngier int kvm_mmu_init(void);
173e9f63768SMike Rapoport 
17437c43753SMarc Zyngier struct kvm;
17537c43753SMarc Zyngier 
1762d58b733SMarc Zyngier #define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
1772d58b733SMarc Zyngier 
1782d58b733SMarc Zyngier static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
1792d58b733SMarc Zyngier {
1808d404c4cSChristoffer Dall 	return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
1812d58b733SMarc Zyngier }
1822d58b733SMarc Zyngier 
18317ab9d57SMarc Zyngier static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
18437c43753SMarc Zyngier {
1850d3e4d4fSMarc Zyngier 	void *va = page_address(pfn_to_page(pfn));
1860d3e4d4fSMarc Zyngier 
187e48d53a9SMarc Zyngier 	/*
188e48d53a9SMarc Zyngier 	 * With FWB, we ensure that the guest always accesses memory using
189e48d53a9SMarc Zyngier 	 * cacheable attributes, and we don't have to clean to PoC when
190e48d53a9SMarc Zyngier 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
191e48d53a9SMarc Zyngier 	 * PoU is not required either in this case.
192e48d53a9SMarc Zyngier 	 */
193e48d53a9SMarc Zyngier 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
194e48d53a9SMarc Zyngier 		return;
195e48d53a9SMarc Zyngier 
1960d3e4d4fSMarc Zyngier 	kvm_flush_dcache_to_poc(va, size);
197a15f6939SMarc Zyngier }
1982d58b733SMarc Zyngier 
19917ab9d57SMarc Zyngier static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
200a15f6939SMarc Zyngier 						  unsigned long size)
201a15f6939SMarc Zyngier {
20287da236eSWill Deacon 	if (icache_is_aliasing()) {
20337c43753SMarc Zyngier 		/* any kind of VIPT cache */
20437c43753SMarc Zyngier 		__flush_icache_all();
20587da236eSWill Deacon 	} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
20687da236eSWill Deacon 		/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
207a15f6939SMarc Zyngier 		void *va = page_address(pfn_to_page(pfn));
208a15f6939SMarc Zyngier 
2094fee9473SMarc Zyngier 		invalidate_icache_range((unsigned long)va,
21087da236eSWill Deacon 					(unsigned long)va + size);
21137c43753SMarc Zyngier 	}
21237c43753SMarc Zyngier }
21337c43753SMarc Zyngier 
2143c1e7165SMarc Zyngier void kvm_set_way_flush(struct kvm_vcpu *vcpu);
2153c1e7165SMarc Zyngier void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
2169d218a1fSMarc Zyngier 
21720475f78SVladimir Murzin static inline unsigned int kvm_get_vmid_bits(void)
21820475f78SVladimir Murzin {
21946823dd1SDave Martin 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
22020475f78SVladimir Murzin 
221c73433fcSAnshuman Khandual 	return get_vmid_bits(reg);
22220475f78SVladimir Murzin }
22320475f78SVladimir Murzin 
224bf308242SAndre Przywara /*
225bf308242SAndre Przywara  * We are not in the kvm->srcu critical section most of the time, so we take
226bf308242SAndre Przywara  * the SRCU read lock here. Since we copy the data from the user page, we
227bf308242SAndre Przywara  * can immediately drop the lock again.
228bf308242SAndre Przywara  */
229bf308242SAndre Przywara static inline int kvm_read_guest_lock(struct kvm *kvm,
230bf308242SAndre Przywara 				      gpa_t gpa, void *data, unsigned long len)
231bf308242SAndre Przywara {
232bf308242SAndre Przywara 	int srcu_idx = srcu_read_lock(&kvm->srcu);
233bf308242SAndre Przywara 	int ret = kvm_read_guest(kvm, gpa, data, len);
234bf308242SAndre Przywara 
235bf308242SAndre Przywara 	srcu_read_unlock(&kvm->srcu, srcu_idx);
236bf308242SAndre Przywara 
237bf308242SAndre Przywara 	return ret;
238bf308242SAndre Przywara }
239bf308242SAndre Przywara 
240a6ecfb11SMarc Zyngier static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
241a6ecfb11SMarc Zyngier 				       const void *data, unsigned long len)
242a6ecfb11SMarc Zyngier {
243a6ecfb11SMarc Zyngier 	int srcu_idx = srcu_read_lock(&kvm->srcu);
244a6ecfb11SMarc Zyngier 	int ret = kvm_write_guest(kvm, gpa, data, len);
245a6ecfb11SMarc Zyngier 
246a6ecfb11SMarc Zyngier 	srcu_read_unlock(&kvm->srcu, srcu_idx);
247a6ecfb11SMarc Zyngier 
248a6ecfb11SMarc Zyngier 	return ret;
249a6ecfb11SMarc Zyngier }
250a6ecfb11SMarc Zyngier 
251dee39247SMarc Zyngier /*
252dee39247SMarc Zyngier  * EL2 vectors can be mapped and rerouted in a number of ways,
253dee39247SMarc Zyngier  * depending on the kernel configuration and CPU present:
254dee39247SMarc Zyngier  *
255688f1e4bSWill Deacon  * - If the CPU is affected by Spectre-v2, the hardening sequence is
256688f1e4bSWill Deacon  *   placed in one of the vector slots, which is executed before jumping
257688f1e4bSWill Deacon  *   to the real vectors.
258dee39247SMarc Zyngier  *
259688f1e4bSWill Deacon  * - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
260688f1e4bSWill Deacon  *   containing the hardening sequence is mapped next to the idmap page,
261688f1e4bSWill Deacon  *   and executed before jumping to the real vectors.
262dee39247SMarc Zyngier  *
263dee39247SMarc Zyngier  * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
264dee39247SMarc Zyngier  *   empty slot is selected, mapped next to the idmap page, and
265dee39247SMarc Zyngier  *   executed before jumping to the real vectors.
266dee39247SMarc Zyngier  *
267dee39247SMarc Zyngier  * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
268dee39247SMarc Zyngier  * VHE, as we don't have hypervisor-specific mappings. If the system
269dee39247SMarc Zyngier  * is VHE and yet selects this capability, it will be ignored.
270dee39247SMarc Zyngier  */
271dee39247SMarc Zyngier extern void *__kvm_bp_vect_base;
272dee39247SMarc Zyngier extern int __kvm_harden_el2_vector_slot;
273dee39247SMarc Zyngier 
2746840bdd7SMarc Zyngier static inline void *kvm_get_hyp_vector(void)
2756840bdd7SMarc Zyngier {
2766840bdd7SMarc Zyngier 	struct bp_hardening_data *data = arm64_get_bp_hardening_data();
277dee39247SMarc Zyngier 	void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
278dee39247SMarc Zyngier 	int slot = -1;
2796840bdd7SMarc Zyngier 
280688f1e4bSWill Deacon 	if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
2816e52aab9SMark Brown 		vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
282dee39247SMarc Zyngier 		slot = data->hyp_vectors_slot;
2836840bdd7SMarc Zyngier 	}
2846840bdd7SMarc Zyngier 
285dee39247SMarc Zyngier 	if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
286dee39247SMarc Zyngier 		vect = __kvm_bp_vect_base;
287dee39247SMarc Zyngier 		if (slot == -1)
288dee39247SMarc Zyngier 			slot = __kvm_harden_el2_vector_slot;
289dee39247SMarc Zyngier 	}
290dee39247SMarc Zyngier 
291dee39247SMarc Zyngier 	if (slot != -1)
292dee39247SMarc Zyngier 		vect += slot * SZ_2K;
293dee39247SMarc Zyngier 
2946840bdd7SMarc Zyngier 	return vect;
2956840bdd7SMarc Zyngier }
2966840bdd7SMarc Zyngier 
297529c4b05SKristina Martsenko #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
298529c4b05SKristina Martsenko 
299a0e50aa3SChristoffer Dall static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
300ab510027SVladimir Murzin {
301a0e50aa3SChristoffer Dall 	struct kvm_vmid *vmid = &mmu->vmid;
302e329fb75SChristoffer Dall 	u64 vmid_field, baddr;
303e329fb75SChristoffer Dall 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
304e329fb75SChristoffer Dall 
305a0e50aa3SChristoffer Dall 	baddr = mmu->pgd_phys;
306e329fb75SChristoffer Dall 	vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
307e329fb75SChristoffer Dall 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
308ab510027SVladimir Murzin }
309ab510027SVladimir Murzin 
310fe677be9SMarc Zyngier /*
311fe677be9SMarc Zyngier  * Must be called from hyp code running at EL2 with an updated VTTBR
312fe677be9SMarc Zyngier  * and interrupts disabled.
313fe677be9SMarc Zyngier  */
314a0e50aa3SChristoffer Dall static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
315fe677be9SMarc Zyngier {
316a0e50aa3SChristoffer Dall 	write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
317a0e50aa3SChristoffer Dall 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
318fe677be9SMarc Zyngier 
319fe677be9SMarc Zyngier 	/*
320fe677be9SMarc Zyngier 	 * ARM errata 1165522 and 1530923 require the actual execution of the
321fe677be9SMarc Zyngier 	 * above before we can switch to the EL1/EL0 translation regime used by
322fe677be9SMarc Zyngier 	 * the guest.
323fe677be9SMarc Zyngier 	 */
324fe677be9SMarc Zyngier 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
325fe677be9SMarc Zyngier }
326fe677be9SMarc Zyngier 
32737c43753SMarc Zyngier #endif /* __ASSEMBLY__ */
32837c43753SMarc Zyngier #endif /* __ARM64_KVM_MMU_H__ */
329