1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Will Deacon <will@kernel.org>
5  */
6 
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9 
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 
14 #define KVM_PGTABLE_MAX_LEVELS		4U
15 
16 /*
17  * The largest supported block sizes for KVM (no 52-bit PA support):
18  *  - 4K (level 1):	1GB
19  *  - 16K (level 2):	32MB
20  *  - 64K (level 2):	512MB
21  */
22 #ifdef CONFIG_ARM64_4K_PAGES
23 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	1U
24 #else
25 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	2U
26 #endif
27 
28 static inline u64 kvm_get_parange(u64 mmfr0)
29 {
30 	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
31 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
32 	if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
33 		parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
34 
35 	return parange;
36 }
37 
38 typedef u64 kvm_pte_t;
39 
40 #define KVM_PTE_VALID			BIT(0)
41 
42 #define KVM_PTE_ADDR_MASK		GENMASK(47, PAGE_SHIFT)
43 #define KVM_PTE_ADDR_51_48		GENMASK(15, 12)
44 
45 #define KVM_PHYS_INVALID		(-1ULL)
46 
47 static inline bool kvm_pte_valid(kvm_pte_t pte)
48 {
49 	return pte & KVM_PTE_VALID;
50 }
51 
52 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
53 {
54 	u64 pa = pte & KVM_PTE_ADDR_MASK;
55 
56 	if (PAGE_SHIFT == 16)
57 		pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
58 
59 	return pa;
60 }
61 
62 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
63 {
64 	kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
65 
66 	if (PAGE_SHIFT == 16) {
67 		pa &= GENMASK(51, 48);
68 		pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
69 	}
70 
71 	return pte;
72 }
73 
74 static inline u64 kvm_granule_shift(u32 level)
75 {
76 	/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
77 	return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
78 }
79 
80 static inline u64 kvm_granule_size(u32 level)
81 {
82 	return BIT(kvm_granule_shift(level));
83 }
84 
85 static inline bool kvm_level_supports_block_mapping(u32 level)
86 {
87 	return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
88 }
89 
90 /**
91  * struct kvm_pgtable_mm_ops - Memory management callbacks.
92  * @zalloc_page:		Allocate a single zeroed memory page.
93  *				The @arg parameter can be used by the walker
94  *				to pass a memcache. The initial refcount of
95  *				the page is 1.
96  * @zalloc_pages_exact:		Allocate an exact number of zeroed memory pages.
97  *				The @size parameter is in bytes, and is rounded
98  *				up to the next page boundary. The resulting
99  *				allocation is physically contiguous.
100  * @free_pages_exact:		Free an exact number of memory pages previously
101  *				allocated by zalloc_pages_exact.
102  * @free_removed_table:		Free a removed paging structure by unlinking and
103  *				dropping references.
104  * @get_page:			Increment the refcount on a page.
105  * @put_page:			Decrement the refcount on a page. When the
106  *				refcount reaches 0 the page is automatically
107  *				freed.
108  * @page_count:			Return the refcount of a page.
109  * @phys_to_virt:		Convert a physical address into a virtual
110  *				address	mapped in the current context.
111  * @virt_to_phys:		Convert a virtual address mapped in the current
112  *				context into a physical address.
113  * @dcache_clean_inval_poc:	Clean and invalidate the data cache to the PoC
114  *				for the	specified memory address range.
115  * @icache_inval_pou:		Invalidate the instruction cache to the PoU
116  *				for the specified memory address range.
117  */
118 struct kvm_pgtable_mm_ops {
119 	void*		(*zalloc_page)(void *arg);
120 	void*		(*zalloc_pages_exact)(size_t size);
121 	void		(*free_pages_exact)(void *addr, size_t size);
122 	void		(*free_removed_table)(void *addr, u32 level);
123 	void		(*get_page)(void *addr);
124 	void		(*put_page)(void *addr);
125 	int		(*page_count)(void *addr);
126 	void*		(*phys_to_virt)(phys_addr_t phys);
127 	phys_addr_t	(*virt_to_phys)(void *addr);
128 	void		(*dcache_clean_inval_poc)(void *addr, size_t size);
129 	void		(*icache_inval_pou)(void *addr, size_t size);
130 };
131 
132 /**
133  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
134  * @KVM_PGTABLE_S2_NOFWB:	Don't enforce Normal-WB even if the CPUs have
135  *				ARM64_HAS_STAGE2_FWB.
136  * @KVM_PGTABLE_S2_IDMAP:	Only use identity mappings.
137  */
138 enum kvm_pgtable_stage2_flags {
139 	KVM_PGTABLE_S2_NOFWB			= BIT(0),
140 	KVM_PGTABLE_S2_IDMAP			= BIT(1),
141 };
142 
143 /**
144  * enum kvm_pgtable_prot - Page-table permissions and attributes.
145  * @KVM_PGTABLE_PROT_X:		Execute permission.
146  * @KVM_PGTABLE_PROT_W:		Write permission.
147  * @KVM_PGTABLE_PROT_R:		Read permission.
148  * @KVM_PGTABLE_PROT_DEVICE:	Device attributes.
149  * @KVM_PGTABLE_PROT_SW0:	Software bit 0.
150  * @KVM_PGTABLE_PROT_SW1:	Software bit 1.
151  * @KVM_PGTABLE_PROT_SW2:	Software bit 2.
152  * @KVM_PGTABLE_PROT_SW3:	Software bit 3.
153  */
154 enum kvm_pgtable_prot {
155 	KVM_PGTABLE_PROT_X			= BIT(0),
156 	KVM_PGTABLE_PROT_W			= BIT(1),
157 	KVM_PGTABLE_PROT_R			= BIT(2),
158 
159 	KVM_PGTABLE_PROT_DEVICE			= BIT(3),
160 
161 	KVM_PGTABLE_PROT_SW0			= BIT(55),
162 	KVM_PGTABLE_PROT_SW1			= BIT(56),
163 	KVM_PGTABLE_PROT_SW2			= BIT(57),
164 	KVM_PGTABLE_PROT_SW3			= BIT(58),
165 };
166 
167 #define KVM_PGTABLE_PROT_RW	(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
168 #define KVM_PGTABLE_PROT_RWX	(KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
169 
170 #define PKVM_HOST_MEM_PROT	KVM_PGTABLE_PROT_RWX
171 #define PKVM_HOST_MMIO_PROT	KVM_PGTABLE_PROT_RW
172 
173 #define PAGE_HYP		KVM_PGTABLE_PROT_RW
174 #define PAGE_HYP_EXEC		(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
175 #define PAGE_HYP_RO		(KVM_PGTABLE_PROT_R)
176 #define PAGE_HYP_DEVICE		(PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
177 
178 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
179 					   enum kvm_pgtable_prot prot);
180 
181 /**
182  * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
183  * @KVM_PGTABLE_WALK_LEAF:		Visit leaf entries, including invalid
184  *					entries.
185  * @KVM_PGTABLE_WALK_TABLE_PRE:		Visit table entries before their
186  *					children.
187  * @KVM_PGTABLE_WALK_TABLE_POST:	Visit table entries after their
188  *					children.
189  * @KVM_PGTABLE_WALK_SHARED:		Indicates the page-tables may be shared
190  *					with other software walkers.
191  */
192 enum kvm_pgtable_walk_flags {
193 	KVM_PGTABLE_WALK_LEAF			= BIT(0),
194 	KVM_PGTABLE_WALK_TABLE_PRE		= BIT(1),
195 	KVM_PGTABLE_WALK_TABLE_POST		= BIT(2),
196 	KVM_PGTABLE_WALK_SHARED			= BIT(3),
197 };
198 
199 struct kvm_pgtable_visit_ctx {
200 	kvm_pte_t				*ptep;
201 	kvm_pte_t				old;
202 	void					*arg;
203 	struct kvm_pgtable_mm_ops		*mm_ops;
204 	u64					addr;
205 	u64					end;
206 	u32					level;
207 	enum kvm_pgtable_walk_flags		flags;
208 };
209 
210 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
211 					enum kvm_pgtable_walk_flags visit);
212 
213 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
214 {
215 	return ctx->flags & KVM_PGTABLE_WALK_SHARED;
216 }
217 
218 /**
219  * struct kvm_pgtable_walker - Hook into a page-table walk.
220  * @cb:		Callback function to invoke during the walk.
221  * @arg:	Argument passed to the callback function.
222  * @flags:	Bitwise-OR of flags to identify the entry types on which to
223  *		invoke the callback function.
224  */
225 struct kvm_pgtable_walker {
226 	const kvm_pgtable_visitor_fn_t		cb;
227 	void * const				arg;
228 	const enum kvm_pgtable_walk_flags	flags;
229 };
230 
231 /*
232  * RCU cannot be used in a non-kernel context such as the hyp. As such, page
233  * table walkers used in hyp do not call into RCU and instead use other
234  * synchronization mechanisms (such as a spinlock).
235  */
236 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
237 
238 typedef kvm_pte_t *kvm_pteref_t;
239 
240 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
241 						kvm_pteref_t pteref)
242 {
243 	return pteref;
244 }
245 
246 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
247 {
248 	/*
249 	 * Due to the lack of RCU (or a similar protection scheme), only
250 	 * non-shared table walkers are allowed in the hypervisor.
251 	 */
252 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
253 		return -EPERM;
254 
255 	return 0;
256 }
257 
258 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
259 
260 static inline bool kvm_pgtable_walk_lock_held(void)
261 {
262 	return true;
263 }
264 
265 #else
266 
267 typedef kvm_pte_t __rcu *kvm_pteref_t;
268 
269 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
270 						kvm_pteref_t pteref)
271 {
272 	return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
273 }
274 
275 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
276 {
277 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
278 		rcu_read_lock();
279 
280 	return 0;
281 }
282 
283 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
284 {
285 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
286 		rcu_read_unlock();
287 }
288 
289 static inline bool kvm_pgtable_walk_lock_held(void)
290 {
291 	return rcu_read_lock_held();
292 }
293 
294 #endif
295 
296 /**
297  * struct kvm_pgtable - KVM page-table.
298  * @ia_bits:		Maximum input address size, in bits.
299  * @start_level:	Level at which the page-table walk starts.
300  * @pgd:		Pointer to the first top-level entry of the page-table.
301  * @mm_ops:		Memory management callbacks.
302  * @mmu:		Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
303  * @flags:		Stage-2 page-table flags.
304  * @force_pte_cb:	Function that returns true if page level mappings must
305  *			be used instead of block mappings.
306  */
307 struct kvm_pgtable {
308 	u32					ia_bits;
309 	u32					start_level;
310 	kvm_pteref_t				pgd;
311 	struct kvm_pgtable_mm_ops		*mm_ops;
312 
313 	/* Stage-2 only */
314 	struct kvm_s2_mmu			*mmu;
315 	enum kvm_pgtable_stage2_flags		flags;
316 	kvm_pgtable_force_pte_cb_t		force_pte_cb;
317 };
318 
319 /**
320  * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
321  * @pgt:	Uninitialised page-table structure to initialise.
322  * @va_bits:	Maximum virtual address bits.
323  * @mm_ops:	Memory management callbacks.
324  *
325  * Return: 0 on success, negative error code on failure.
326  */
327 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
328 			 struct kvm_pgtable_mm_ops *mm_ops);
329 
330 /**
331  * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
332  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
333  *
334  * The page-table is assumed to be unreachable by any hardware walkers prior
335  * to freeing and therefore no TLB invalidation is performed.
336  */
337 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
338 
339 /**
340  * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
341  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
342  * @addr:	Virtual address at which to place the mapping.
343  * @size:	Size of the mapping.
344  * @phys:	Physical address of the memory to map.
345  * @prot:	Permissions and attributes for the mapping.
346  *
347  * The offset of @addr within a page is ignored, @size is rounded-up to
348  * the next page boundary and @phys is rounded-down to the previous page
349  * boundary.
350  *
351  * If device attributes are not explicitly requested in @prot, then the
352  * mapping will be normal, cacheable. Attempts to install a new mapping
353  * for a virtual address that is already mapped will be rejected with an
354  * error and a WARN().
355  *
356  * Return: 0 on success, negative error code on failure.
357  */
358 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
359 			enum kvm_pgtable_prot prot);
360 
361 /**
362  * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
363  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
364  * @addr:	Virtual address from which to remove the mapping.
365  * @size:	Size of the mapping.
366  *
367  * The offset of @addr within a page is ignored, @size is rounded-up to
368  * the next page boundary and @phys is rounded-down to the previous page
369  * boundary.
370  *
371  * TLB invalidation is performed for each page-table entry cleared during the
372  * unmapping operation and the reference count for the page-table page
373  * containing the cleared entry is decremented, with unreferenced pages being
374  * freed. The unmapping operation will stop early if it encounters either an
375  * invalid page-table entry or a valid block mapping which maps beyond the range
376  * being unmapped.
377  *
378  * Return: Number of bytes unmapped, which may be 0.
379  */
380 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
381 
382 /**
383  * kvm_get_vtcr() - Helper to construct VTCR_EL2
384  * @mmfr0:	Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
385  * @mmfr1:	Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
386  * @phys_shfit:	Value to set in VTCR_EL2.T0SZ.
387  *
388  * The VTCR value is common across all the physical CPUs on the system.
389  * We use system wide sanitised values to fill in different fields,
390  * except for Hardware Management of Access Flags. HA Flag is set
391  * unconditionally on all CPUs, as it is safe to run with or without
392  * the feature and the bit is RES0 on CPUs that don't support it.
393  *
394  * Return: VTCR_EL2 value
395  */
396 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
397 
398 /**
399  * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
400  * @vtcr:	Content of the VTCR register.
401  *
402  * Return: the size (in bytes) of the stage-2 PGD
403  */
404 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
405 
406 /**
407  * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
408  * @pgt:	Uninitialised page-table structure to initialise.
409  * @mmu:	S2 MMU context for this S2 translation
410  * @mm_ops:	Memory management callbacks.
411  * @flags:	Stage-2 configuration flags.
412  * @force_pte_cb: Function that returns true if page level mappings must
413  *		be used instead of block mappings.
414  *
415  * Return: 0 on success, negative error code on failure.
416  */
417 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
418 			      struct kvm_pgtable_mm_ops *mm_ops,
419 			      enum kvm_pgtable_stage2_flags flags,
420 			      kvm_pgtable_force_pte_cb_t force_pte_cb);
421 
422 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
423 	__kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
424 
425 /**
426  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
427  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
428  *
429  * The page-table is assumed to be unreachable by any hardware walkers prior
430  * to freeing and therefore no TLB invalidation is performed.
431  */
432 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
433 
434 /**
435  * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
436  * @mm_ops:	Memory management callbacks.
437  * @pgtable:	Unlinked stage-2 paging structure to be freed.
438  * @level:	Level of the stage-2 paging structure to be freed.
439  *
440  * The page-table is assumed to be unreachable by any hardware walkers prior to
441  * freeing and therefore no TLB invalidation is performed.
442  */
443 void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
444 
445 /**
446  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
447  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
448  * @addr:	Intermediate physical address at which to place the mapping.
449  * @size:	Size of the mapping.
450  * @phys:	Physical address of the memory to map.
451  * @prot:	Permissions and attributes for the mapping.
452  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
453  *		page-table pages.
454  * @flags:	Flags to control the page-table walk (ex. a shared walk)
455  *
456  * The offset of @addr within a page is ignored, @size is rounded-up to
457  * the next page boundary and @phys is rounded-down to the previous page
458  * boundary.
459  *
460  * If device attributes are not explicitly requested in @prot, then the
461  * mapping will be normal, cacheable.
462  *
463  * Note that the update of a valid leaf PTE in this function will be aborted,
464  * if it's trying to recreate the exact same mapping or only change the access
465  * permissions. Instead, the vCPU will exit one more time from guest if still
466  * needed and then go through the path of relaxing permissions.
467  *
468  * Note that this function will both coalesce existing table entries and split
469  * existing block mappings, relying on page-faults to fault back areas outside
470  * of the new mapping lazily.
471  *
472  * Return: 0 on success, negative error code on failure.
473  */
474 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
475 			   u64 phys, enum kvm_pgtable_prot prot,
476 			   void *mc, enum kvm_pgtable_walk_flags flags);
477 
478 /**
479  * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
480  *				    track ownership.
481  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
482  * @addr:	Base intermediate physical address to annotate.
483  * @size:	Size of the annotated range.
484  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
485  *		page-table pages.
486  * @owner_id:	Unique identifier for the owner of the page.
487  *
488  * By default, all page-tables are owned by identifier 0. This function can be
489  * used to mark portions of the IPA space as owned by other entities. When a
490  * stage 2 is used with identity-mappings, these annotations allow to use the
491  * page-table data structure as a simple rmap.
492  *
493  * Return: 0 on success, negative error code on failure.
494  */
495 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
496 				 void *mc, u8 owner_id);
497 
498 /**
499  * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
500  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
501  * @addr:	Intermediate physical address from which to remove the mapping.
502  * @size:	Size of the mapping.
503  *
504  * The offset of @addr within a page is ignored and @size is rounded-up to
505  * the next page boundary.
506  *
507  * TLB invalidation is performed for each page-table entry cleared during the
508  * unmapping operation and the reference count for the page-table page
509  * containing the cleared entry is decremented, with unreferenced pages being
510  * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
511  * FWB is not supported by the CPU.
512  *
513  * Return: 0 on success, negative error code on failure.
514  */
515 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
516 
517 /**
518  * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
519  *                                  without TLB invalidation.
520  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
521  * @addr:	Intermediate physical address from which to write-protect,
522  * @size:	Size of the range.
523  *
524  * The offset of @addr within a page is ignored and @size is rounded-up to
525  * the next page boundary.
526  *
527  * Note that it is the caller's responsibility to invalidate the TLB after
528  * calling this function to ensure that the updated permissions are visible
529  * to the CPUs.
530  *
531  * Return: 0 on success, negative error code on failure.
532  */
533 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
534 
535 /**
536  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
537  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
538  * @addr:	Intermediate physical address to identify the page-table entry.
539  *
540  * The offset of @addr within a page is ignored.
541  *
542  * If there is a valid, leaf page-table entry used to translate @addr, then
543  * set the access flag in that entry.
544  *
545  * Return: The old page-table entry prior to setting the flag, 0 on failure.
546  */
547 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
548 
549 /**
550  * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
551  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
552  * @addr:	Intermediate physical address to identify the page-table entry.
553  *
554  * The offset of @addr within a page is ignored.
555  *
556  * If there is a valid, leaf page-table entry used to translate @addr, then
557  * clear the access flag in that entry.
558  *
559  * Note that it is the caller's responsibility to invalidate the TLB after
560  * calling this function to ensure that the updated permissions are visible
561  * to the CPUs.
562  *
563  * Return: The old page-table entry prior to clearing the flag, 0 on failure.
564  */
565 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
566 
567 /**
568  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
569  *				      page-table entry.
570  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
571  * @addr:	Intermediate physical address to identify the page-table entry.
572  * @prot:	Additional permissions to grant for the mapping.
573  *
574  * The offset of @addr within a page is ignored.
575  *
576  * If there is a valid, leaf page-table entry used to translate @addr, then
577  * relax the permissions in that entry according to the read, write and
578  * execute permissions specified by @prot. No permissions are removed, and
579  * TLB invalidation is performed after updating the entry. Software bits cannot
580  * be set or cleared using kvm_pgtable_stage2_relax_perms().
581  *
582  * Return: 0 on success, negative error code on failure.
583  */
584 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
585 				   enum kvm_pgtable_prot prot);
586 
587 /**
588  * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
589  *				   access flag set.
590  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
591  * @addr:	Intermediate physical address to identify the page-table entry.
592  *
593  * The offset of @addr within a page is ignored.
594  *
595  * Return: True if the page-table entry has the access flag set, false otherwise.
596  */
597 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
598 
599 /**
600  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
601  * 				      of Coherency for guest stage-2 address
602  *				      range.
603  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
604  * @addr:	Intermediate physical address from which to flush.
605  * @size:	Size of the range.
606  *
607  * The offset of @addr within a page is ignored and @size is rounded-up to
608  * the next page boundary.
609  *
610  * Return: 0 on success, negative error code on failure.
611  */
612 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
613 
614 /**
615  * kvm_pgtable_walk() - Walk a page-table.
616  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init().
617  * @addr:	Input address for the start of the walk.
618  * @size:	Size of the range to walk.
619  * @walker:	Walker callback description.
620  *
621  * The offset of @addr within a page is ignored and @size is rounded-up to
622  * the next page boundary.
623  *
624  * The walker will walk the page-table entries corresponding to the input
625  * address range specified, visiting entries according to the walker flags.
626  * Invalid entries are treated as leaf entries. Leaf entries are reloaded
627  * after invoking the walker callback, allowing the walker to descend into
628  * a newly installed table.
629  *
630  * Returning a negative error code from the walker callback function will
631  * terminate the walk immediately with the same error code.
632  *
633  * Return: 0 on success, negative error code on failure.
634  */
635 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
636 		     struct kvm_pgtable_walker *walker);
637 
638 /**
639  * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
640  *			    with its level.
641  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init()
642  *		or a similar initialiser.
643  * @addr:	Input address for the start of the walk.
644  * @ptep:	Pointer to storage for the retrieved PTE.
645  * @level:	Pointer to storage for the level of the retrieved PTE.
646  *
647  * The offset of @addr within a page is ignored.
648  *
649  * The walker will walk the page-table entries corresponding to the input
650  * address specified, retrieving the leaf corresponding to this address.
651  * Invalid entries are treated as leaf entries.
652  *
653  * Return: 0 on success, negative error code on failure.
654  */
655 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
656 			 kvm_pte_t *ptep, u32 *level);
657 
658 /**
659  * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
660  *				   stage-2 Page-Table Entry.
661  * @pte:	Page-table entry
662  *
663  * Return: protection attributes of the page-table entry in the enum
664  *	   kvm_pgtable_prot format.
665  */
666 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
667 
668 /**
669  * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
670  *				Page-Table Entry.
671  * @pte:	Page-table entry
672  *
673  * Return: protection attributes of the page-table entry in the enum
674  *	   kvm_pgtable_prot format.
675  */
676 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
677 #endif	/* __ARM64_KVM_PGTABLE_H__ */
678