11025c8c0SQuentin Perret /* SPDX-License-Identifier: GPL-2.0-only */
21025c8c0SQuentin Perret /*
31025c8c0SQuentin Perret * Copyright (C) 2020 Google LLC
41025c8c0SQuentin Perret * Author: Quentin Perret <qperret@google.com>
51025c8c0SQuentin Perret */
61025c8c0SQuentin Perret
71025c8c0SQuentin Perret #ifndef __KVM_NVHE_MEM_PROTECT__
81025c8c0SQuentin Perret #define __KVM_NVHE_MEM_PROTECT__
91025c8c0SQuentin Perret #include <linux/kvm_host.h>
101025c8c0SQuentin Perret #include <asm/kvm_hyp.h>
114d968b12SWill Deacon #include <asm/kvm_mmu.h>
121025c8c0SQuentin Perret #include <asm/kvm_pgtable.h>
131025c8c0SQuentin Perret #include <asm/virt.h>
14a1ec5c70SFuad Tabba #include <nvhe/pkvm.h>
151025c8c0SQuentin Perret #include <nvhe/spinlock.h>
161025c8c0SQuentin Perret
17ec250a67SQuentin Perret /*
18ec250a67SQuentin Perret * SW bits 0-1 are reserved to track the memory ownership state of each page:
19ec250a67SQuentin Perret * 00: The page is owned exclusively by the page-table owner.
20ec250a67SQuentin Perret * 01: The page is owned by the page-table owner, but is shared
21ec250a67SQuentin Perret * with another entity.
22ec250a67SQuentin Perret * 10: The page is shared with, but not owned by the page-table owner.
23ec250a67SQuentin Perret * 11: Reserved for future use (lending).
24ec250a67SQuentin Perret */
25ec250a67SQuentin Perret enum pkvm_page_state {
26ec250a67SQuentin Perret PKVM_PAGE_OWNED = 0ULL,
27ec250a67SQuentin Perret PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0,
28ec250a67SQuentin Perret PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1,
293d467f7bSWill Deacon __PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 |
303d467f7bSWill Deacon KVM_PGTABLE_PROT_SW1,
313d467f7bSWill Deacon
323d467f7bSWill Deacon /* Meta-states which aren't encoded directly in the PTE's SW bits */
333d467f7bSWill Deacon PKVM_NOPAGE,
34ec250a67SQuentin Perret };
35ec250a67SQuentin Perret
36ec250a67SQuentin Perret #define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
pkvm_mkstate(enum kvm_pgtable_prot prot,enum pkvm_page_state state)37ec250a67SQuentin Perret static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
38ec250a67SQuentin Perret enum pkvm_page_state state)
39ec250a67SQuentin Perret {
40ec250a67SQuentin Perret return (prot & ~PKVM_PAGE_STATE_PROT_MASK) | state;
41ec250a67SQuentin Perret }
42ec250a67SQuentin Perret
pkvm_getstate(enum kvm_pgtable_prot prot)43ec250a67SQuentin Perret static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
44ec250a67SQuentin Perret {
45ec250a67SQuentin Perret return prot & PKVM_PAGE_STATE_PROT_MASK;
46ec250a67SQuentin Perret }
47ec250a67SQuentin Perret
485304002dSWill Deacon struct host_mmu {
491025c8c0SQuentin Perret struct kvm_arch arch;
501025c8c0SQuentin Perret struct kvm_pgtable pgt;
511025c8c0SQuentin Perret struct kvm_pgtable_mm_ops mm_ops;
521025c8c0SQuentin Perret hyp_spinlock_t lock;
531025c8c0SQuentin Perret };
545304002dSWill Deacon extern struct host_mmu host_mmu;
551025c8c0SQuentin Perret
5633bc332dSWill Deacon /* This corresponds to page-table locking order */
5733bc332dSWill Deacon enum pkvm_component_id {
5833bc332dSWill Deacon PKVM_ID_HOST,
5933bc332dSWill Deacon PKVM_ID_HYP,
60*f9112eadSWill Deacon PKVM_ID_FFA,
6133bc332dSWill Deacon };
622d77e238SQuentin Perret
63aa6948f8SQuentin Perret extern unsigned long hyp_nr_cpus;
64aa6948f8SQuentin Perret
651025c8c0SQuentin Perret int __pkvm_prot_finalize(void);
6666c57eddSQuentin Perret int __pkvm_host_share_hyp(u64 pfn);
67b8cc6eb5SWill Deacon int __pkvm_host_unshare_hyp(u64 pfn);
681ed5c24cSWill Deacon int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
691ed5c24cSWill Deacon int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
70*f9112eadSWill Deacon int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
71*f9112eadSWill Deacon int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
7290134ac9SQuentin Perret
73e009dce1SQuentin Perret bool addr_is_memory(phys_addr_t phys);
7439257da0SQuentin Perret int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
7539257da0SQuentin Perret int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
767c350ea3SQuentin Perret int kvm_host_prepare_stage2(void *pgt_pool_base);
77a1ec5c70SFuad Tabba int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
781025c8c0SQuentin Perret void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
791025c8c0SQuentin Perret
809926cfceSQuentin Perret int hyp_pin_shared_mem(void *from, void *to);
819926cfceSQuentin Perret void hyp_unpin_shared_mem(void *from, void *to);
82f41dff4eSQuentin Perret void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
83717a7eebSQuentin Perret int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
84717a7eebSQuentin Perret struct kvm_hyp_memcache *host_mc);
859926cfceSQuentin Perret
__load_host_stage2(void)861025c8c0SQuentin Perret static __always_inline void __load_host_stage2(void)
871025c8c0SQuentin Perret {
881025c8c0SQuentin Perret if (static_branch_likely(&kvm_protected_mode_initialized))
895304002dSWill Deacon __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
901025c8c0SQuentin Perret else
911025c8c0SQuentin Perret write_sysreg(0, vttbr_el2);
921025c8c0SQuentin Perret }
931025c8c0SQuentin Perret #endif /* __KVM_NVHE_MEM_PROTECT__ */
94