1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Will Deacon <will@kernel.org> 5 */ 6 7 #ifndef __ARM64_KVM_PGTABLE_H__ 8 #define __ARM64_KVM_PGTABLE_H__ 9 10 #include <linux/bits.h> 11 #include <linux/kvm_host.h> 12 #include <linux/types.h> 13 14 #define KVM_PGTABLE_MAX_LEVELS 4U 15 16 /* 17 * The largest supported block sizes for KVM (no 52-bit PA support): 18 * - 4K (level 1): 1GB 19 * - 16K (level 2): 32MB 20 * - 64K (level 2): 512MB 21 */ 22 #ifdef CONFIG_ARM64_4K_PAGES 23 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 1U 24 #else 25 #define KVM_PGTABLE_MIN_BLOCK_LEVEL 2U 26 #endif 27 28 static inline u64 kvm_get_parange(u64 mmfr0) 29 { 30 u64 parange = cpuid_feature_extract_unsigned_field(mmfr0, 31 ID_AA64MMFR0_EL1_PARANGE_SHIFT); 32 if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX) 33 parange = ID_AA64MMFR0_EL1_PARANGE_MAX; 34 35 return parange; 36 } 37 38 typedef u64 kvm_pte_t; 39 40 #define KVM_PTE_VALID BIT(0) 41 42 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT) 43 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12) 44 45 #define KVM_PHYS_INVALID (-1ULL) 46 47 static inline bool kvm_pte_valid(kvm_pte_t pte) 48 { 49 return pte & KVM_PTE_VALID; 50 } 51 52 static inline u64 kvm_pte_to_phys(kvm_pte_t pte) 53 { 54 u64 pa = pte & KVM_PTE_ADDR_MASK; 55 56 if (PAGE_SHIFT == 16) 57 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48; 58 59 return pa; 60 } 61 62 static inline kvm_pte_t kvm_phys_to_pte(u64 pa) 63 { 64 kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK; 65 66 if (PAGE_SHIFT == 16) { 67 pa &= GENMASK(51, 48); 68 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48); 69 } 70 71 return pte; 72 } 73 74 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte) 75 { 76 return __phys_to_pfn(kvm_pte_to_phys(pte)); 77 } 78 79 static inline u64 kvm_granule_shift(u32 level) 80 { 81 /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */ 82 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level); 83 } 84 85 static inline u64 kvm_granule_size(u32 level) 86 { 87 return BIT(kvm_granule_shift(level)); 88 } 89 90 static inline bool kvm_level_supports_block_mapping(u32 level) 91 { 92 return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL; 93 } 94 95 /** 96 * struct kvm_pgtable_mm_ops - Memory management callbacks. 97 * @zalloc_page: Allocate a single zeroed memory page. 98 * The @arg parameter can be used by the walker 99 * to pass a memcache. The initial refcount of 100 * the page is 1. 101 * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages. 102 * The @size parameter is in bytes, and is rounded 103 * up to the next page boundary. The resulting 104 * allocation is physically contiguous. 105 * @free_pages_exact: Free an exact number of memory pages previously 106 * allocated by zalloc_pages_exact. 107 * @free_removed_table: Free a removed paging structure by unlinking and 108 * dropping references. 109 * @get_page: Increment the refcount on a page. 110 * @put_page: Decrement the refcount on a page. When the 111 * refcount reaches 0 the page is automatically 112 * freed. 113 * @page_count: Return the refcount of a page. 114 * @phys_to_virt: Convert a physical address into a virtual 115 * address mapped in the current context. 116 * @virt_to_phys: Convert a virtual address mapped in the current 117 * context into a physical address. 118 * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC 119 * for the specified memory address range. 120 * @icache_inval_pou: Invalidate the instruction cache to the PoU 121 * for the specified memory address range. 122 */ 123 struct kvm_pgtable_mm_ops { 124 void* (*zalloc_page)(void *arg); 125 void* (*zalloc_pages_exact)(size_t size); 126 void (*free_pages_exact)(void *addr, size_t size); 127 void (*free_removed_table)(void *addr, u32 level); 128 void (*get_page)(void *addr); 129 void (*put_page)(void *addr); 130 int (*page_count)(void *addr); 131 void* (*phys_to_virt)(phys_addr_t phys); 132 phys_addr_t (*virt_to_phys)(void *addr); 133 void (*dcache_clean_inval_poc)(void *addr, size_t size); 134 void (*icache_inval_pou)(void *addr, size_t size); 135 }; 136 137 /** 138 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags. 139 * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have 140 * ARM64_HAS_STAGE2_FWB. 141 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings. 142 */ 143 enum kvm_pgtable_stage2_flags { 144 KVM_PGTABLE_S2_NOFWB = BIT(0), 145 KVM_PGTABLE_S2_IDMAP = BIT(1), 146 }; 147 148 /** 149 * enum kvm_pgtable_prot - Page-table permissions and attributes. 150 * @KVM_PGTABLE_PROT_X: Execute permission. 151 * @KVM_PGTABLE_PROT_W: Write permission. 152 * @KVM_PGTABLE_PROT_R: Read permission. 153 * @KVM_PGTABLE_PROT_DEVICE: Device attributes. 154 * @KVM_PGTABLE_PROT_SW0: Software bit 0. 155 * @KVM_PGTABLE_PROT_SW1: Software bit 1. 156 * @KVM_PGTABLE_PROT_SW2: Software bit 2. 157 * @KVM_PGTABLE_PROT_SW3: Software bit 3. 158 */ 159 enum kvm_pgtable_prot { 160 KVM_PGTABLE_PROT_X = BIT(0), 161 KVM_PGTABLE_PROT_W = BIT(1), 162 KVM_PGTABLE_PROT_R = BIT(2), 163 164 KVM_PGTABLE_PROT_DEVICE = BIT(3), 165 166 KVM_PGTABLE_PROT_SW0 = BIT(55), 167 KVM_PGTABLE_PROT_SW1 = BIT(56), 168 KVM_PGTABLE_PROT_SW2 = BIT(57), 169 KVM_PGTABLE_PROT_SW3 = BIT(58), 170 }; 171 172 #define KVM_PGTABLE_PROT_RW (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) 173 #define KVM_PGTABLE_PROT_RWX (KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X) 174 175 #define PKVM_HOST_MEM_PROT KVM_PGTABLE_PROT_RWX 176 #define PKVM_HOST_MMIO_PROT KVM_PGTABLE_PROT_RW 177 178 #define PAGE_HYP KVM_PGTABLE_PROT_RW 179 #define PAGE_HYP_EXEC (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X) 180 #define PAGE_HYP_RO (KVM_PGTABLE_PROT_R) 181 #define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE) 182 183 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end, 184 enum kvm_pgtable_prot prot); 185 186 /** 187 * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk. 188 * @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid 189 * entries. 190 * @KVM_PGTABLE_WALK_TABLE_PRE: Visit table entries before their 191 * children. 192 * @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their 193 * children. 194 * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared 195 * with other software walkers. 196 * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was 197 * invoked from a fault handler. 198 */ 199 enum kvm_pgtable_walk_flags { 200 KVM_PGTABLE_WALK_LEAF = BIT(0), 201 KVM_PGTABLE_WALK_TABLE_PRE = BIT(1), 202 KVM_PGTABLE_WALK_TABLE_POST = BIT(2), 203 KVM_PGTABLE_WALK_SHARED = BIT(3), 204 KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4), 205 }; 206 207 struct kvm_pgtable_visit_ctx { 208 kvm_pte_t *ptep; 209 kvm_pte_t old; 210 void *arg; 211 struct kvm_pgtable_mm_ops *mm_ops; 212 u64 start; 213 u64 addr; 214 u64 end; 215 u32 level; 216 enum kvm_pgtable_walk_flags flags; 217 }; 218 219 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx, 220 enum kvm_pgtable_walk_flags visit); 221 222 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx) 223 { 224 return ctx->flags & KVM_PGTABLE_WALK_SHARED; 225 } 226 227 /** 228 * struct kvm_pgtable_walker - Hook into a page-table walk. 229 * @cb: Callback function to invoke during the walk. 230 * @arg: Argument passed to the callback function. 231 * @flags: Bitwise-OR of flags to identify the entry types on which to 232 * invoke the callback function. 233 */ 234 struct kvm_pgtable_walker { 235 const kvm_pgtable_visitor_fn_t cb; 236 void * const arg; 237 const enum kvm_pgtable_walk_flags flags; 238 }; 239 240 /* 241 * RCU cannot be used in a non-kernel context such as the hyp. As such, page 242 * table walkers used in hyp do not call into RCU and instead use other 243 * synchronization mechanisms (such as a spinlock). 244 */ 245 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__) 246 247 typedef kvm_pte_t *kvm_pteref_t; 248 249 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, 250 kvm_pteref_t pteref) 251 { 252 return pteref; 253 } 254 255 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) 256 { 257 /* 258 * Due to the lack of RCU (or a similar protection scheme), only 259 * non-shared table walkers are allowed in the hypervisor. 260 */ 261 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 262 return -EPERM; 263 264 return 0; 265 } 266 267 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {} 268 269 static inline bool kvm_pgtable_walk_lock_held(void) 270 { 271 return true; 272 } 273 274 #else 275 276 typedef kvm_pte_t __rcu *kvm_pteref_t; 277 278 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker, 279 kvm_pteref_t pteref) 280 { 281 return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED)); 282 } 283 284 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker) 285 { 286 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 287 rcu_read_lock(); 288 289 return 0; 290 } 291 292 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) 293 { 294 if (walker->flags & KVM_PGTABLE_WALK_SHARED) 295 rcu_read_unlock(); 296 } 297 298 static inline bool kvm_pgtable_walk_lock_held(void) 299 { 300 return rcu_read_lock_held(); 301 } 302 303 #endif 304 305 /** 306 * struct kvm_pgtable - KVM page-table. 307 * @ia_bits: Maximum input address size, in bits. 308 * @start_level: Level at which the page-table walk starts. 309 * @pgd: Pointer to the first top-level entry of the page-table. 310 * @mm_ops: Memory management callbacks. 311 * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables. 312 * @flags: Stage-2 page-table flags. 313 * @force_pte_cb: Function that returns true if page level mappings must 314 * be used instead of block mappings. 315 */ 316 struct kvm_pgtable { 317 u32 ia_bits; 318 u32 start_level; 319 kvm_pteref_t pgd; 320 struct kvm_pgtable_mm_ops *mm_ops; 321 322 /* Stage-2 only */ 323 struct kvm_s2_mmu *mmu; 324 enum kvm_pgtable_stage2_flags flags; 325 kvm_pgtable_force_pte_cb_t force_pte_cb; 326 }; 327 328 /** 329 * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table. 330 * @pgt: Uninitialised page-table structure to initialise. 331 * @va_bits: Maximum virtual address bits. 332 * @mm_ops: Memory management callbacks. 333 * 334 * Return: 0 on success, negative error code on failure. 335 */ 336 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, 337 struct kvm_pgtable_mm_ops *mm_ops); 338 339 /** 340 * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table. 341 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 342 * 343 * The page-table is assumed to be unreachable by any hardware walkers prior 344 * to freeing and therefore no TLB invalidation is performed. 345 */ 346 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); 347 348 /** 349 * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table. 350 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 351 * @addr: Virtual address at which to place the mapping. 352 * @size: Size of the mapping. 353 * @phys: Physical address of the memory to map. 354 * @prot: Permissions and attributes for the mapping. 355 * 356 * The offset of @addr within a page is ignored, @size is rounded-up to 357 * the next page boundary and @phys is rounded-down to the previous page 358 * boundary. 359 * 360 * If device attributes are not explicitly requested in @prot, then the 361 * mapping will be normal, cacheable. Attempts to install a new mapping 362 * for a virtual address that is already mapped will be rejected with an 363 * error and a WARN(). 364 * 365 * Return: 0 on success, negative error code on failure. 366 */ 367 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, 368 enum kvm_pgtable_prot prot); 369 370 /** 371 * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table. 372 * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init(). 373 * @addr: Virtual address from which to remove the mapping. 374 * @size: Size of the mapping. 375 * 376 * The offset of @addr within a page is ignored, @size is rounded-up to 377 * the next page boundary and @phys is rounded-down to the previous page 378 * boundary. 379 * 380 * TLB invalidation is performed for each page-table entry cleared during the 381 * unmapping operation and the reference count for the page-table page 382 * containing the cleared entry is decremented, with unreferenced pages being 383 * freed. The unmapping operation will stop early if it encounters either an 384 * invalid page-table entry or a valid block mapping which maps beyond the range 385 * being unmapped. 386 * 387 * Return: Number of bytes unmapped, which may be 0. 388 */ 389 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 390 391 /** 392 * kvm_get_vtcr() - Helper to construct VTCR_EL2 393 * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. 394 * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register. 395 * @phys_shfit: Value to set in VTCR_EL2.T0SZ. 396 * 397 * The VTCR value is common across all the physical CPUs on the system. 398 * We use system wide sanitised values to fill in different fields, 399 * except for Hardware Management of Access Flags. HA Flag is set 400 * unconditionally on all CPUs, as it is safe to run with or without 401 * the feature and the bit is RES0 on CPUs that don't support it. 402 * 403 * Return: VTCR_EL2 value 404 */ 405 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); 406 407 /** 408 * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD 409 * @vtcr: Content of the VTCR register. 410 * 411 * Return: the size (in bytes) of the stage-2 PGD 412 */ 413 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr); 414 415 /** 416 * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. 417 * @pgt: Uninitialised page-table structure to initialise. 418 * @mmu: S2 MMU context for this S2 translation 419 * @mm_ops: Memory management callbacks. 420 * @flags: Stage-2 configuration flags. 421 * @force_pte_cb: Function that returns true if page level mappings must 422 * be used instead of block mappings. 423 * 424 * Return: 0 on success, negative error code on failure. 425 */ 426 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, 427 struct kvm_pgtable_mm_ops *mm_ops, 428 enum kvm_pgtable_stage2_flags flags, 429 kvm_pgtable_force_pte_cb_t force_pte_cb); 430 431 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \ 432 __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL) 433 434 /** 435 * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. 436 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 437 * 438 * The page-table is assumed to be unreachable by any hardware walkers prior 439 * to freeing and therefore no TLB invalidation is performed. 440 */ 441 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt); 442 443 /** 444 * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure. 445 * @mm_ops: Memory management callbacks. 446 * @pgtable: Unlinked stage-2 paging structure to be freed. 447 * @level: Level of the stage-2 paging structure to be freed. 448 * 449 * The page-table is assumed to be unreachable by any hardware walkers prior to 450 * freeing and therefore no TLB invalidation is performed. 451 */ 452 void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level); 453 454 /** 455 * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table. 456 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 457 * @addr: Intermediate physical address at which to place the mapping. 458 * @size: Size of the mapping. 459 * @phys: Physical address of the memory to map. 460 * @prot: Permissions and attributes for the mapping. 461 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 462 * page-table pages. 463 * @flags: Flags to control the page-table walk (ex. a shared walk) 464 * 465 * The offset of @addr within a page is ignored, @size is rounded-up to 466 * the next page boundary and @phys is rounded-down to the previous page 467 * boundary. 468 * 469 * If device attributes are not explicitly requested in @prot, then the 470 * mapping will be normal, cacheable. 471 * 472 * Note that the update of a valid leaf PTE in this function will be aborted, 473 * if it's trying to recreate the exact same mapping or only change the access 474 * permissions. Instead, the vCPU will exit one more time from guest if still 475 * needed and then go through the path of relaxing permissions. 476 * 477 * Note that this function will both coalesce existing table entries and split 478 * existing block mappings, relying on page-faults to fault back areas outside 479 * of the new mapping lazily. 480 * 481 * Return: 0 on success, negative error code on failure. 482 */ 483 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, 484 u64 phys, enum kvm_pgtable_prot prot, 485 void *mc, enum kvm_pgtable_walk_flags flags); 486 487 /** 488 * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to 489 * track ownership. 490 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 491 * @addr: Base intermediate physical address to annotate. 492 * @size: Size of the annotated range. 493 * @mc: Cache of pre-allocated and zeroed memory from which to allocate 494 * page-table pages. 495 * @owner_id: Unique identifier for the owner of the page. 496 * 497 * By default, all page-tables are owned by identifier 0. This function can be 498 * used to mark portions of the IPA space as owned by other entities. When a 499 * stage 2 is used with identity-mappings, these annotations allow to use the 500 * page-table data structure as a simple rmap. 501 * 502 * Return: 0 on success, negative error code on failure. 503 */ 504 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, 505 void *mc, u8 owner_id); 506 507 /** 508 * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table. 509 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 510 * @addr: Intermediate physical address from which to remove the mapping. 511 * @size: Size of the mapping. 512 * 513 * The offset of @addr within a page is ignored and @size is rounded-up to 514 * the next page boundary. 515 * 516 * TLB invalidation is performed for each page-table entry cleared during the 517 * unmapping operation and the reference count for the page-table page 518 * containing the cleared entry is decremented, with unreferenced pages being 519 * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if 520 * FWB is not supported by the CPU. 521 * 522 * Return: 0 on success, negative error code on failure. 523 */ 524 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size); 525 526 /** 527 * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range 528 * without TLB invalidation. 529 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 530 * @addr: Intermediate physical address from which to write-protect, 531 * @size: Size of the range. 532 * 533 * The offset of @addr within a page is ignored and @size is rounded-up to 534 * the next page boundary. 535 * 536 * Note that it is the caller's responsibility to invalidate the TLB after 537 * calling this function to ensure that the updated permissions are visible 538 * to the CPUs. 539 * 540 * Return: 0 on success, negative error code on failure. 541 */ 542 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); 543 544 /** 545 * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry. 546 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 547 * @addr: Intermediate physical address to identify the page-table entry. 548 * 549 * The offset of @addr within a page is ignored. 550 * 551 * If there is a valid, leaf page-table entry used to translate @addr, then 552 * set the access flag in that entry. 553 * 554 * Return: The old page-table entry prior to setting the flag, 0 on failure. 555 */ 556 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); 557 558 /** 559 * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. 560 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 561 * @addr: Intermediate physical address to identify the page-table entry. 562 * 563 * The offset of @addr within a page is ignored. 564 * 565 * If there is a valid, leaf page-table entry used to translate @addr, then 566 * clear the access flag in that entry. 567 * 568 * Note that it is the caller's responsibility to invalidate the TLB after 569 * calling this function to ensure that the updated permissions are visible 570 * to the CPUs. 571 * 572 * Return: The old page-table entry prior to clearing the flag, 0 on failure. 573 */ 574 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); 575 576 /** 577 * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a 578 * page-table entry. 579 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 580 * @addr: Intermediate physical address to identify the page-table entry. 581 * @prot: Additional permissions to grant for the mapping. 582 * 583 * The offset of @addr within a page is ignored. 584 * 585 * If there is a valid, leaf page-table entry used to translate @addr, then 586 * relax the permissions in that entry according to the read, write and 587 * execute permissions specified by @prot. No permissions are removed, and 588 * TLB invalidation is performed after updating the entry. Software bits cannot 589 * be set or cleared using kvm_pgtable_stage2_relax_perms(). 590 * 591 * Return: 0 on success, negative error code on failure. 592 */ 593 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, 594 enum kvm_pgtable_prot prot); 595 596 /** 597 * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the 598 * access flag set. 599 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 600 * @addr: Intermediate physical address to identify the page-table entry. 601 * 602 * The offset of @addr within a page is ignored. 603 * 604 * Return: True if the page-table entry has the access flag set, false otherwise. 605 */ 606 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); 607 608 /** 609 * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point 610 * of Coherency for guest stage-2 address 611 * range. 612 * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). 613 * @addr: Intermediate physical address from which to flush. 614 * @size: Size of the range. 615 * 616 * The offset of @addr within a page is ignored and @size is rounded-up to 617 * the next page boundary. 618 * 619 * Return: 0 on success, negative error code on failure. 620 */ 621 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); 622 623 /** 624 * kvm_pgtable_walk() - Walk a page-table. 625 * @pgt: Page-table structure initialised by kvm_pgtable_*_init(). 626 * @addr: Input address for the start of the walk. 627 * @size: Size of the range to walk. 628 * @walker: Walker callback description. 629 * 630 * The offset of @addr within a page is ignored and @size is rounded-up to 631 * the next page boundary. 632 * 633 * The walker will walk the page-table entries corresponding to the input 634 * address range specified, visiting entries according to the walker flags. 635 * Invalid entries are treated as leaf entries. The visited page table entry is 636 * reloaded after invoking the walker callback, allowing the walker to descend 637 * into a newly installed table. 638 * 639 * Returning a negative error code from the walker callback function will 640 * terminate the walk immediately with the same error code. 641 * 642 * Return: 0 on success, negative error code on failure. 643 */ 644 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, 645 struct kvm_pgtable_walker *walker); 646 647 /** 648 * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry 649 * with its level. 650 * @pgt: Page-table structure initialised by kvm_pgtable_*_init() 651 * or a similar initialiser. 652 * @addr: Input address for the start of the walk. 653 * @ptep: Pointer to storage for the retrieved PTE. 654 * @level: Pointer to storage for the level of the retrieved PTE. 655 * 656 * The offset of @addr within a page is ignored. 657 * 658 * The walker will walk the page-table entries corresponding to the input 659 * address specified, retrieving the leaf corresponding to this address. 660 * Invalid entries are treated as leaf entries. 661 * 662 * Return: 0 on success, negative error code on failure. 663 */ 664 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr, 665 kvm_pte_t *ptep, u32 *level); 666 667 /** 668 * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a 669 * stage-2 Page-Table Entry. 670 * @pte: Page-table entry 671 * 672 * Return: protection attributes of the page-table entry in the enum 673 * kvm_pgtable_prot format. 674 */ 675 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte); 676 677 /** 678 * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1 679 * Page-Table Entry. 680 * @pte: Page-table entry 681 * 682 * Return: protection attributes of the page-table entry in the enum 683 * kvm_pgtable_prot format. 684 */ 685 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte); 686 #endif /* __ARM64_KVM_PGTABLE_H__ */ 687