1e759959fSBrijesh Singh // SPDX-License-Identifier: GPL-2.0 2e759959fSBrijesh Singh /* 3e759959fSBrijesh Singh * AMD Encrypted Register State Support 4e759959fSBrijesh Singh * 5e759959fSBrijesh Singh * Author: Joerg Roedel <jroedel@suse.de> 6e759959fSBrijesh Singh */ 7e759959fSBrijesh Singh 8e759959fSBrijesh Singh /* 9e759959fSBrijesh Singh * misc.h needs to be first because it knows how to include the other kernel 10e759959fSBrijesh Singh * headers in the pre-decompression code in a way that does not break 11e759959fSBrijesh Singh * compilation. 12e759959fSBrijesh Singh */ 13e759959fSBrijesh Singh #include "misc.h" 14e759959fSBrijesh Singh 15e759959fSBrijesh Singh #include <asm/pgtable_types.h> 16e759959fSBrijesh Singh #include <asm/sev.h> 17e759959fSBrijesh Singh #include <asm/trapnr.h> 18e759959fSBrijesh Singh #include <asm/trap_pf.h> 19e759959fSBrijesh Singh #include <asm/msr-index.h> 20e759959fSBrijesh Singh #include <asm/fpu/xcr.h> 21e759959fSBrijesh Singh #include <asm/ptrace.h> 22e759959fSBrijesh Singh #include <asm/svm.h> 23e759959fSBrijesh Singh 24e759959fSBrijesh Singh #include "error.h" 25950d0055SMichael Roth #include "../msr.h" 26e759959fSBrijesh Singh 27e759959fSBrijesh Singh struct ghcb boot_ghcb_page __aligned(PAGE_SIZE); 28e759959fSBrijesh Singh struct ghcb *boot_ghcb; 29e759959fSBrijesh Singh 30e759959fSBrijesh Singh /* 31e759959fSBrijesh Singh * Copy a version of this function here - insn-eval.c can't be used in 32e759959fSBrijesh Singh * pre-decompression code. 33e759959fSBrijesh Singh */ 34e759959fSBrijesh Singh static bool insn_has_rep_prefix(struct insn *insn) 35e759959fSBrijesh Singh { 36e759959fSBrijesh Singh insn_byte_t p; 37e759959fSBrijesh Singh int i; 38e759959fSBrijesh Singh 39e759959fSBrijesh Singh insn_get_prefixes(insn); 40e759959fSBrijesh Singh 41e759959fSBrijesh Singh for_each_insn_prefix(insn, i, p) { 42e759959fSBrijesh Singh if (p == 0xf2 || p == 0xf3) 43e759959fSBrijesh Singh return true; 44e759959fSBrijesh Singh } 45e759959fSBrijesh Singh 46e759959fSBrijesh Singh return false; 47e759959fSBrijesh Singh } 48e759959fSBrijesh Singh 49e759959fSBrijesh Singh /* 50e759959fSBrijesh Singh * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and 51e759959fSBrijesh Singh * doesn't use segments. 52e759959fSBrijesh Singh */ 53e759959fSBrijesh Singh static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) 54e759959fSBrijesh Singh { 55e759959fSBrijesh Singh return 0UL; 56e759959fSBrijesh Singh } 57e759959fSBrijesh Singh 58e759959fSBrijesh Singh static inline u64 sev_es_rd_ghcb_msr(void) 59e759959fSBrijesh Singh { 60950d0055SMichael Roth struct msr m; 61e759959fSBrijesh Singh 62950d0055SMichael Roth boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m); 63e759959fSBrijesh Singh 64950d0055SMichael Roth return m.q; 65e759959fSBrijesh Singh } 66e759959fSBrijesh Singh 67e759959fSBrijesh Singh static inline void sev_es_wr_ghcb_msr(u64 val) 68e759959fSBrijesh Singh { 69950d0055SMichael Roth struct msr m; 70e759959fSBrijesh Singh 71950d0055SMichael Roth m.q = val; 72950d0055SMichael Roth boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m); 73e759959fSBrijesh Singh } 74e759959fSBrijesh Singh 75e759959fSBrijesh Singh static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) 76e759959fSBrijesh Singh { 77e759959fSBrijesh Singh char buffer[MAX_INSN_SIZE]; 78e759959fSBrijesh Singh int ret; 79e759959fSBrijesh Singh 80e759959fSBrijesh Singh memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); 81e759959fSBrijesh Singh 82e759959fSBrijesh Singh ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); 83e759959fSBrijesh Singh if (ret < 0) 84e759959fSBrijesh Singh return ES_DECODE_FAILED; 85e759959fSBrijesh Singh 86e759959fSBrijesh Singh return ES_OK; 87e759959fSBrijesh Singh } 88e759959fSBrijesh Singh 89e759959fSBrijesh Singh static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, 90e759959fSBrijesh Singh void *dst, char *buf, size_t size) 91e759959fSBrijesh Singh { 92e759959fSBrijesh Singh memcpy(dst, buf, size); 93e759959fSBrijesh Singh 94e759959fSBrijesh Singh return ES_OK; 95e759959fSBrijesh Singh } 96e759959fSBrijesh Singh 97e759959fSBrijesh Singh static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, 98e759959fSBrijesh Singh void *src, char *buf, size_t size) 99e759959fSBrijesh Singh { 100e759959fSBrijesh Singh memcpy(buf, src, size); 101e759959fSBrijesh Singh 102e759959fSBrijesh Singh return ES_OK; 103e759959fSBrijesh Singh } 104e759959fSBrijesh Singh 105e759959fSBrijesh Singh #undef __init 106e759959fSBrijesh Singh #undef __pa 107e759959fSBrijesh Singh #define __init 108e759959fSBrijesh Singh #define __pa(x) ((unsigned long)(x)) 109e759959fSBrijesh Singh 110e759959fSBrijesh Singh #define __BOOT_COMPRESSED 111e759959fSBrijesh Singh 112e759959fSBrijesh Singh /* Basic instruction decoding support needed */ 113e759959fSBrijesh Singh #include "../../lib/inat.c" 114e759959fSBrijesh Singh #include "../../lib/insn.c" 115e759959fSBrijesh Singh 116e759959fSBrijesh Singh /* Include code for early handlers */ 117e759959fSBrijesh Singh #include "../../kernel/sev-shared.c" 118e759959fSBrijesh Singh 119*4f9c403eSBrijesh Singh static inline bool sev_snp_enabled(void) 120*4f9c403eSBrijesh Singh { 121*4f9c403eSBrijesh Singh return sev_status & MSR_AMD64_SEV_SNP_ENABLED; 122*4f9c403eSBrijesh Singh } 123*4f9c403eSBrijesh Singh 124*4f9c403eSBrijesh Singh static void __page_state_change(unsigned long paddr, enum psc_op op) 125*4f9c403eSBrijesh Singh { 126*4f9c403eSBrijesh Singh u64 val; 127*4f9c403eSBrijesh Singh 128*4f9c403eSBrijesh Singh if (!sev_snp_enabled()) 129*4f9c403eSBrijesh Singh return; 130*4f9c403eSBrijesh Singh 131*4f9c403eSBrijesh Singh /* 132*4f9c403eSBrijesh Singh * If private -> shared then invalidate the page before requesting the 133*4f9c403eSBrijesh Singh * state change in the RMP table. 134*4f9c403eSBrijesh Singh */ 135*4f9c403eSBrijesh Singh if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0)) 136*4f9c403eSBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 137*4f9c403eSBrijesh Singh 138*4f9c403eSBrijesh Singh /* Issue VMGEXIT to change the page state in RMP table. */ 139*4f9c403eSBrijesh Singh sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op)); 140*4f9c403eSBrijesh Singh VMGEXIT(); 141*4f9c403eSBrijesh Singh 142*4f9c403eSBrijesh Singh /* Read the response of the VMGEXIT. */ 143*4f9c403eSBrijesh Singh val = sev_es_rd_ghcb_msr(); 144*4f9c403eSBrijesh Singh if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val)) 145*4f9c403eSBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); 146*4f9c403eSBrijesh Singh 147*4f9c403eSBrijesh Singh /* 148*4f9c403eSBrijesh Singh * Now that page state is changed in the RMP table, validate it so that it is 149*4f9c403eSBrijesh Singh * consistent with the RMP entry. 150*4f9c403eSBrijesh Singh */ 151*4f9c403eSBrijesh Singh if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1)) 152*4f9c403eSBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 153*4f9c403eSBrijesh Singh } 154*4f9c403eSBrijesh Singh 155*4f9c403eSBrijesh Singh void snp_set_page_private(unsigned long paddr) 156*4f9c403eSBrijesh Singh { 157*4f9c403eSBrijesh Singh __page_state_change(paddr, SNP_PAGE_STATE_PRIVATE); 158*4f9c403eSBrijesh Singh } 159*4f9c403eSBrijesh Singh 160*4f9c403eSBrijesh Singh void snp_set_page_shared(unsigned long paddr) 161*4f9c403eSBrijesh Singh { 162*4f9c403eSBrijesh Singh __page_state_change(paddr, SNP_PAGE_STATE_SHARED); 163*4f9c403eSBrijesh Singh } 164*4f9c403eSBrijesh Singh 165cbd3d4f7SBrijesh Singh static bool early_setup_ghcb(void) 166e759959fSBrijesh Singh { 167e759959fSBrijesh Singh if (set_page_decrypted((unsigned long)&boot_ghcb_page)) 168e759959fSBrijesh Singh return false; 169e759959fSBrijesh Singh 170e759959fSBrijesh Singh /* Page is now mapped decrypted, clear it */ 171e759959fSBrijesh Singh memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page)); 172e759959fSBrijesh Singh 173e759959fSBrijesh Singh boot_ghcb = &boot_ghcb_page; 174e759959fSBrijesh Singh 175e759959fSBrijesh Singh /* Initialize lookup tables for the instruction decoder */ 176e759959fSBrijesh Singh inat_init_tables(); 177e759959fSBrijesh Singh 178e759959fSBrijesh Singh return true; 179e759959fSBrijesh Singh } 180e759959fSBrijesh Singh 181e759959fSBrijesh Singh void sev_es_shutdown_ghcb(void) 182e759959fSBrijesh Singh { 183e759959fSBrijesh Singh if (!boot_ghcb) 184e759959fSBrijesh Singh return; 185e759959fSBrijesh Singh 186e759959fSBrijesh Singh if (!sev_es_check_cpu_features()) 187e759959fSBrijesh Singh error("SEV-ES CPU Features missing."); 188e759959fSBrijesh Singh 189e759959fSBrijesh Singh /* 190e759959fSBrijesh Singh * GHCB Page must be flushed from the cache and mapped encrypted again. 191e759959fSBrijesh Singh * Otherwise the running kernel will see strange cache effects when 192e759959fSBrijesh Singh * trying to use that page. 193e759959fSBrijesh Singh */ 194e759959fSBrijesh Singh if (set_page_encrypted((unsigned long)&boot_ghcb_page)) 195e759959fSBrijesh Singh error("Can't map GHCB page encrypted"); 196e759959fSBrijesh Singh 197e759959fSBrijesh Singh /* 198e759959fSBrijesh Singh * GHCB page is mapped encrypted again and flushed from the cache. 199e759959fSBrijesh Singh * Mark it non-present now to catch bugs when #VC exceptions trigger 200e759959fSBrijesh Singh * after this point. 201e759959fSBrijesh Singh */ 202e759959fSBrijesh Singh if (set_page_non_present((unsigned long)&boot_ghcb_page)) 203e759959fSBrijesh Singh error("Can't unmap GHCB page"); 204e759959fSBrijesh Singh } 205e759959fSBrijesh Singh 206e759959fSBrijesh Singh bool sev_es_check_ghcb_fault(unsigned long address) 207e759959fSBrijesh Singh { 208e759959fSBrijesh Singh /* Check whether the fault was on the GHCB page */ 209e759959fSBrijesh Singh return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page); 210e759959fSBrijesh Singh } 211e759959fSBrijesh Singh 212e759959fSBrijesh Singh void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code) 213e759959fSBrijesh Singh { 214e759959fSBrijesh Singh struct es_em_ctxt ctxt; 215e759959fSBrijesh Singh enum es_result result; 216e759959fSBrijesh Singh 217cbd3d4f7SBrijesh Singh if (!boot_ghcb && !early_setup_ghcb()) 2186c0f74d6SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 219e759959fSBrijesh Singh 220e759959fSBrijesh Singh vc_ghcb_invalidate(boot_ghcb); 221e759959fSBrijesh Singh result = vc_init_em_ctxt(&ctxt, regs, exit_code); 222e759959fSBrijesh Singh if (result != ES_OK) 223e759959fSBrijesh Singh goto finish; 224e759959fSBrijesh Singh 225e759959fSBrijesh Singh switch (exit_code) { 226e759959fSBrijesh Singh case SVM_EXIT_RDTSC: 227e759959fSBrijesh Singh case SVM_EXIT_RDTSCP: 228e759959fSBrijesh Singh result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); 229e759959fSBrijesh Singh break; 230e759959fSBrijesh Singh case SVM_EXIT_IOIO: 231e759959fSBrijesh Singh result = vc_handle_ioio(boot_ghcb, &ctxt); 232e759959fSBrijesh Singh break; 233e759959fSBrijesh Singh case SVM_EXIT_CPUID: 234e759959fSBrijesh Singh result = vc_handle_cpuid(boot_ghcb, &ctxt); 235e759959fSBrijesh Singh break; 236e759959fSBrijesh Singh default: 237e759959fSBrijesh Singh result = ES_UNSUPPORTED; 238e759959fSBrijesh Singh break; 239e759959fSBrijesh Singh } 240e759959fSBrijesh Singh 241e759959fSBrijesh Singh finish: 242e759959fSBrijesh Singh if (result == ES_OK) 243e759959fSBrijesh Singh vc_finish_insn(&ctxt); 244e759959fSBrijesh Singh else if (result != ES_RETRY) 2456c0f74d6SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 246e759959fSBrijesh Singh } 247ec1c66afSMichael Roth 24881cc3df9SBrijesh Singh static void enforce_vmpl0(void) 24981cc3df9SBrijesh Singh { 25081cc3df9SBrijesh Singh u64 attrs; 25181cc3df9SBrijesh Singh int err; 25281cc3df9SBrijesh Singh 25381cc3df9SBrijesh Singh /* 25481cc3df9SBrijesh Singh * RMPADJUST modifies RMP permissions of a lesser-privileged (numerically 25581cc3df9SBrijesh Singh * higher) privilege level. Here, clear the VMPL1 permission mask of the 25681cc3df9SBrijesh Singh * GHCB page. If the guest is not running at VMPL0, this will fail. 25781cc3df9SBrijesh Singh * 25881cc3df9SBrijesh Singh * If the guest is running at VMPL0, it will succeed. Even if that operation 25981cc3df9SBrijesh Singh * modifies permission bits, it is still ok to do so currently because Linux 26081cc3df9SBrijesh Singh * SNP guests are supported only on VMPL0 so VMPL1 or higher permission masks 26181cc3df9SBrijesh Singh * changing is a don't-care. 26281cc3df9SBrijesh Singh */ 26381cc3df9SBrijesh Singh attrs = 1; 26481cc3df9SBrijesh Singh if (rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, attrs)) 26581cc3df9SBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0); 26681cc3df9SBrijesh Singh } 26781cc3df9SBrijesh Singh 268ec1c66afSMichael Roth void sev_enable(struct boot_params *bp) 269ec1c66afSMichael Roth { 270ec1c66afSMichael Roth unsigned int eax, ebx, ecx, edx; 271ec1c66afSMichael Roth struct msr m; 272ec1c66afSMichael Roth 273ec1c66afSMichael Roth /* Check for the SME/SEV support leaf */ 274ec1c66afSMichael Roth eax = 0x80000000; 275ec1c66afSMichael Roth ecx = 0; 276ec1c66afSMichael Roth native_cpuid(&eax, &ebx, &ecx, &edx); 277ec1c66afSMichael Roth if (eax < 0x8000001f) 278ec1c66afSMichael Roth return; 279ec1c66afSMichael Roth 280ec1c66afSMichael Roth /* 281ec1c66afSMichael Roth * Check for the SME/SEV feature: 282ec1c66afSMichael Roth * CPUID Fn8000_001F[EAX] 283ec1c66afSMichael Roth * - Bit 0 - Secure Memory Encryption support 284ec1c66afSMichael Roth * - Bit 1 - Secure Encrypted Virtualization support 285ec1c66afSMichael Roth * CPUID Fn8000_001F[EBX] 286ec1c66afSMichael Roth * - Bits 5:0 - Pagetable bit position used to indicate encryption 287ec1c66afSMichael Roth */ 288ec1c66afSMichael Roth eax = 0x8000001f; 289ec1c66afSMichael Roth ecx = 0; 290ec1c66afSMichael Roth native_cpuid(&eax, &ebx, &ecx, &edx); 291ec1c66afSMichael Roth /* Check whether SEV is supported */ 292ec1c66afSMichael Roth if (!(eax & BIT(1))) 293ec1c66afSMichael Roth return; 294ec1c66afSMichael Roth 295ec1c66afSMichael Roth /* Set the SME mask if this is an SEV guest. */ 296ec1c66afSMichael Roth boot_rdmsr(MSR_AMD64_SEV, &m); 297ec1c66afSMichael Roth sev_status = m.q; 298ec1c66afSMichael Roth if (!(sev_status & MSR_AMD64_SEV_ENABLED)) 299ec1c66afSMichael Roth return; 300ec1c66afSMichael Roth 301cbd3d4f7SBrijesh Singh /* Negotiate the GHCB protocol version. */ 302cbd3d4f7SBrijesh Singh if (sev_status & MSR_AMD64_SEV_ES_ENABLED) { 303cbd3d4f7SBrijesh Singh if (!sev_es_negotiate_protocol()) 304cbd3d4f7SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_PROT_UNSUPPORTED); 305cbd3d4f7SBrijesh Singh } 306cbd3d4f7SBrijesh Singh 307cbd3d4f7SBrijesh Singh /* 308cbd3d4f7SBrijesh Singh * SNP is supported in v2 of the GHCB spec which mandates support for HV 309cbd3d4f7SBrijesh Singh * features. 310cbd3d4f7SBrijesh Singh */ 31181cc3df9SBrijesh Singh if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) { 31281cc3df9SBrijesh Singh if (!(get_hv_features() & GHCB_HV_FT_SNP)) 313cbd3d4f7SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 314cbd3d4f7SBrijesh Singh 31581cc3df9SBrijesh Singh enforce_vmpl0(); 31681cc3df9SBrijesh Singh } 31781cc3df9SBrijesh Singh 318ec1c66afSMichael Roth sme_me_mask = BIT_ULL(ebx & 0x3f); 319ec1c66afSMichael Roth } 320