1e759959fSBrijesh Singh // SPDX-License-Identifier: GPL-2.0 2e759959fSBrijesh Singh /* 3e759959fSBrijesh Singh * AMD Encrypted Register State Support 4e759959fSBrijesh Singh * 5e759959fSBrijesh Singh * Author: Joerg Roedel <jroedel@suse.de> 6e759959fSBrijesh Singh */ 7e759959fSBrijesh Singh 8e759959fSBrijesh Singh /* 9e759959fSBrijesh Singh * misc.h needs to be first because it knows how to include the other kernel 10e759959fSBrijesh Singh * headers in the pre-decompression code in a way that does not break 11e759959fSBrijesh Singh * compilation. 12e759959fSBrijesh Singh */ 13e759959fSBrijesh Singh #include "misc.h" 14e759959fSBrijesh Singh 15e759959fSBrijesh Singh #include <asm/pgtable_types.h> 16e759959fSBrijesh Singh #include <asm/sev.h> 17e759959fSBrijesh Singh #include <asm/trapnr.h> 18e759959fSBrijesh Singh #include <asm/trap_pf.h> 19e759959fSBrijesh Singh #include <asm/msr-index.h> 20e759959fSBrijesh Singh #include <asm/fpu/xcr.h> 21e759959fSBrijesh Singh #include <asm/ptrace.h> 22e759959fSBrijesh Singh #include <asm/svm.h> 23801baa69SMichael Roth #include <asm/cpuid.h> 24e759959fSBrijesh Singh 25e759959fSBrijesh Singh #include "error.h" 26950d0055SMichael Roth #include "../msr.h" 27e759959fSBrijesh Singh 28e759959fSBrijesh Singh struct ghcb boot_ghcb_page __aligned(PAGE_SIZE); 29e759959fSBrijesh Singh struct ghcb *boot_ghcb; 30e759959fSBrijesh Singh 31e759959fSBrijesh Singh /* 32e759959fSBrijesh Singh * Copy a version of this function here - insn-eval.c can't be used in 33e759959fSBrijesh Singh * pre-decompression code. 34e759959fSBrijesh Singh */ 35e759959fSBrijesh Singh static bool insn_has_rep_prefix(struct insn *insn) 36e759959fSBrijesh Singh { 37e759959fSBrijesh Singh insn_byte_t p; 38e759959fSBrijesh Singh int i; 39e759959fSBrijesh Singh 40e759959fSBrijesh Singh insn_get_prefixes(insn); 41e759959fSBrijesh Singh 42e759959fSBrijesh Singh for_each_insn_prefix(insn, i, p) { 43e759959fSBrijesh Singh if (p == 0xf2 || p == 0xf3) 44e759959fSBrijesh Singh return true; 45e759959fSBrijesh Singh } 46e759959fSBrijesh Singh 47e759959fSBrijesh Singh return false; 48e759959fSBrijesh Singh } 49e759959fSBrijesh Singh 50e759959fSBrijesh Singh /* 51e759959fSBrijesh Singh * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and 52e759959fSBrijesh Singh * doesn't use segments. 53e759959fSBrijesh Singh */ 54e759959fSBrijesh Singh static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx) 55e759959fSBrijesh Singh { 56e759959fSBrijesh Singh return 0UL; 57e759959fSBrijesh Singh } 58e759959fSBrijesh Singh 59e759959fSBrijesh Singh static inline u64 sev_es_rd_ghcb_msr(void) 60e759959fSBrijesh Singh { 61950d0055SMichael Roth struct msr m; 62e759959fSBrijesh Singh 63950d0055SMichael Roth boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m); 64e759959fSBrijesh Singh 65950d0055SMichael Roth return m.q; 66e759959fSBrijesh Singh } 67e759959fSBrijesh Singh 68e759959fSBrijesh Singh static inline void sev_es_wr_ghcb_msr(u64 val) 69e759959fSBrijesh Singh { 70950d0055SMichael Roth struct msr m; 71e759959fSBrijesh Singh 72950d0055SMichael Roth m.q = val; 73950d0055SMichael Roth boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m); 74e759959fSBrijesh Singh } 75e759959fSBrijesh Singh 76e759959fSBrijesh Singh static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) 77e759959fSBrijesh Singh { 78e759959fSBrijesh Singh char buffer[MAX_INSN_SIZE]; 79e759959fSBrijesh Singh int ret; 80e759959fSBrijesh Singh 81e759959fSBrijesh Singh memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); 82e759959fSBrijesh Singh 83e759959fSBrijesh Singh ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); 84e759959fSBrijesh Singh if (ret < 0) 85e759959fSBrijesh Singh return ES_DECODE_FAILED; 86e759959fSBrijesh Singh 87e759959fSBrijesh Singh return ES_OK; 88e759959fSBrijesh Singh } 89e759959fSBrijesh Singh 90e759959fSBrijesh Singh static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, 91e759959fSBrijesh Singh void *dst, char *buf, size_t size) 92e759959fSBrijesh Singh { 93e759959fSBrijesh Singh memcpy(dst, buf, size); 94e759959fSBrijesh Singh 95e759959fSBrijesh Singh return ES_OK; 96e759959fSBrijesh Singh } 97e759959fSBrijesh Singh 98e759959fSBrijesh Singh static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, 99e759959fSBrijesh Singh void *src, char *buf, size_t size) 100e759959fSBrijesh Singh { 101e759959fSBrijesh Singh memcpy(buf, src, size); 102e759959fSBrijesh Singh 103e759959fSBrijesh Singh return ES_OK; 104e759959fSBrijesh Singh } 105e759959fSBrijesh Singh 106e759959fSBrijesh Singh #undef __init 107e759959fSBrijesh Singh #undef __pa 108e759959fSBrijesh Singh #define __init 109e759959fSBrijesh Singh #define __pa(x) ((unsigned long)(x)) 110e759959fSBrijesh Singh 111e759959fSBrijesh Singh #define __BOOT_COMPRESSED 112e759959fSBrijesh Singh 113e759959fSBrijesh Singh /* Basic instruction decoding support needed */ 114e759959fSBrijesh Singh #include "../../lib/inat.c" 115e759959fSBrijesh Singh #include "../../lib/insn.c" 116e759959fSBrijesh Singh 117e759959fSBrijesh Singh /* Include code for early handlers */ 118e759959fSBrijesh Singh #include "../../kernel/sev-shared.c" 119e759959fSBrijesh Singh 1204f9c403eSBrijesh Singh static inline bool sev_snp_enabled(void) 1214f9c403eSBrijesh Singh { 1224f9c403eSBrijesh Singh return sev_status & MSR_AMD64_SEV_SNP_ENABLED; 1234f9c403eSBrijesh Singh } 1244f9c403eSBrijesh Singh 1254f9c403eSBrijesh Singh static void __page_state_change(unsigned long paddr, enum psc_op op) 1264f9c403eSBrijesh Singh { 1274f9c403eSBrijesh Singh u64 val; 1284f9c403eSBrijesh Singh 1294f9c403eSBrijesh Singh if (!sev_snp_enabled()) 1304f9c403eSBrijesh Singh return; 1314f9c403eSBrijesh Singh 1324f9c403eSBrijesh Singh /* 1334f9c403eSBrijesh Singh * If private -> shared then invalidate the page before requesting the 1344f9c403eSBrijesh Singh * state change in the RMP table. 1354f9c403eSBrijesh Singh */ 1364f9c403eSBrijesh Singh if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0)) 1374f9c403eSBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 1384f9c403eSBrijesh Singh 1394f9c403eSBrijesh Singh /* Issue VMGEXIT to change the page state in RMP table. */ 1404f9c403eSBrijesh Singh sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op)); 1414f9c403eSBrijesh Singh VMGEXIT(); 1424f9c403eSBrijesh Singh 1434f9c403eSBrijesh Singh /* Read the response of the VMGEXIT. */ 1444f9c403eSBrijesh Singh val = sev_es_rd_ghcb_msr(); 1454f9c403eSBrijesh Singh if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val)) 1464f9c403eSBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC); 1474f9c403eSBrijesh Singh 1484f9c403eSBrijesh Singh /* 1494f9c403eSBrijesh Singh * Now that page state is changed in the RMP table, validate it so that it is 1504f9c403eSBrijesh Singh * consistent with the RMP entry. 1514f9c403eSBrijesh Singh */ 1524f9c403eSBrijesh Singh if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1)) 1534f9c403eSBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 1544f9c403eSBrijesh Singh } 1554f9c403eSBrijesh Singh 1564f9c403eSBrijesh Singh void snp_set_page_private(unsigned long paddr) 1574f9c403eSBrijesh Singh { 1584f9c403eSBrijesh Singh __page_state_change(paddr, SNP_PAGE_STATE_PRIVATE); 1594f9c403eSBrijesh Singh } 1604f9c403eSBrijesh Singh 1614f9c403eSBrijesh Singh void snp_set_page_shared(unsigned long paddr) 1624f9c403eSBrijesh Singh { 1634f9c403eSBrijesh Singh __page_state_change(paddr, SNP_PAGE_STATE_SHARED); 1644f9c403eSBrijesh Singh } 1654f9c403eSBrijesh Singh 166cbd3d4f7SBrijesh Singh static bool early_setup_ghcb(void) 167e759959fSBrijesh Singh { 168e759959fSBrijesh Singh if (set_page_decrypted((unsigned long)&boot_ghcb_page)) 169e759959fSBrijesh Singh return false; 170e759959fSBrijesh Singh 171e759959fSBrijesh Singh /* Page is now mapped decrypted, clear it */ 172e759959fSBrijesh Singh memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page)); 173e759959fSBrijesh Singh 174e759959fSBrijesh Singh boot_ghcb = &boot_ghcb_page; 175e759959fSBrijesh Singh 176e759959fSBrijesh Singh /* Initialize lookup tables for the instruction decoder */ 177e759959fSBrijesh Singh inat_init_tables(); 178e759959fSBrijesh Singh 17987294bdbSBrijesh Singh /* SNP guest requires the GHCB GPA must be registered */ 18087294bdbSBrijesh Singh if (sev_snp_enabled()) 18187294bdbSBrijesh Singh snp_register_ghcb_early(__pa(&boot_ghcb_page)); 18287294bdbSBrijesh Singh 183e759959fSBrijesh Singh return true; 184e759959fSBrijesh Singh } 185e759959fSBrijesh Singh 186e759959fSBrijesh Singh void sev_es_shutdown_ghcb(void) 187e759959fSBrijesh Singh { 188e759959fSBrijesh Singh if (!boot_ghcb) 189e759959fSBrijesh Singh return; 190e759959fSBrijesh Singh 191e759959fSBrijesh Singh if (!sev_es_check_cpu_features()) 192e759959fSBrijesh Singh error("SEV-ES CPU Features missing."); 193e759959fSBrijesh Singh 194e759959fSBrijesh Singh /* 195e759959fSBrijesh Singh * GHCB Page must be flushed from the cache and mapped encrypted again. 196e759959fSBrijesh Singh * Otherwise the running kernel will see strange cache effects when 197e759959fSBrijesh Singh * trying to use that page. 198e759959fSBrijesh Singh */ 199e759959fSBrijesh Singh if (set_page_encrypted((unsigned long)&boot_ghcb_page)) 200e759959fSBrijesh Singh error("Can't map GHCB page encrypted"); 201e759959fSBrijesh Singh 202e759959fSBrijesh Singh /* 203e759959fSBrijesh Singh * GHCB page is mapped encrypted again and flushed from the cache. 204e759959fSBrijesh Singh * Mark it non-present now to catch bugs when #VC exceptions trigger 205e759959fSBrijesh Singh * after this point. 206e759959fSBrijesh Singh */ 207e759959fSBrijesh Singh if (set_page_non_present((unsigned long)&boot_ghcb_page)) 208e759959fSBrijesh Singh error("Can't unmap GHCB page"); 209e759959fSBrijesh Singh } 210e759959fSBrijesh Singh 211*8c29f016SNikunj A Dadhania static void __noreturn sev_es_ghcb_terminate(struct ghcb *ghcb, unsigned int set, 212*8c29f016SNikunj A Dadhania unsigned int reason, u64 exit_info_2) 213*8c29f016SNikunj A Dadhania { 214*8c29f016SNikunj A Dadhania u64 exit_info_1 = SVM_VMGEXIT_TERM_REASON(set, reason); 215*8c29f016SNikunj A Dadhania 216*8c29f016SNikunj A Dadhania vc_ghcb_invalidate(ghcb); 217*8c29f016SNikunj A Dadhania ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_TERM_REQUEST); 218*8c29f016SNikunj A Dadhania ghcb_set_sw_exit_info_1(ghcb, exit_info_1); 219*8c29f016SNikunj A Dadhania ghcb_set_sw_exit_info_2(ghcb, exit_info_2); 220*8c29f016SNikunj A Dadhania 221*8c29f016SNikunj A Dadhania sev_es_wr_ghcb_msr(__pa(ghcb)); 222*8c29f016SNikunj A Dadhania VMGEXIT(); 223*8c29f016SNikunj A Dadhania 224*8c29f016SNikunj A Dadhania while (true) 225*8c29f016SNikunj A Dadhania asm volatile("hlt\n" : : : "memory"); 226*8c29f016SNikunj A Dadhania } 227*8c29f016SNikunj A Dadhania 228e759959fSBrijesh Singh bool sev_es_check_ghcb_fault(unsigned long address) 229e759959fSBrijesh Singh { 230e759959fSBrijesh Singh /* Check whether the fault was on the GHCB page */ 231e759959fSBrijesh Singh return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page); 232e759959fSBrijesh Singh } 233e759959fSBrijesh Singh 234e759959fSBrijesh Singh void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code) 235e759959fSBrijesh Singh { 236e759959fSBrijesh Singh struct es_em_ctxt ctxt; 237e759959fSBrijesh Singh enum es_result result; 238e759959fSBrijesh Singh 239cbd3d4f7SBrijesh Singh if (!boot_ghcb && !early_setup_ghcb()) 2406c0f74d6SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 241e759959fSBrijesh Singh 242e759959fSBrijesh Singh vc_ghcb_invalidate(boot_ghcb); 243e759959fSBrijesh Singh result = vc_init_em_ctxt(&ctxt, regs, exit_code); 244e759959fSBrijesh Singh if (result != ES_OK) 245e759959fSBrijesh Singh goto finish; 246e759959fSBrijesh Singh 247e759959fSBrijesh Singh switch (exit_code) { 248e759959fSBrijesh Singh case SVM_EXIT_RDTSC: 249e759959fSBrijesh Singh case SVM_EXIT_RDTSCP: 250e759959fSBrijesh Singh result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); 251e759959fSBrijesh Singh break; 252e759959fSBrijesh Singh case SVM_EXIT_IOIO: 253e759959fSBrijesh Singh result = vc_handle_ioio(boot_ghcb, &ctxt); 254e759959fSBrijesh Singh break; 255e759959fSBrijesh Singh case SVM_EXIT_CPUID: 256e759959fSBrijesh Singh result = vc_handle_cpuid(boot_ghcb, &ctxt); 257e759959fSBrijesh Singh break; 258e759959fSBrijesh Singh default: 259e759959fSBrijesh Singh result = ES_UNSUPPORTED; 260e759959fSBrijesh Singh break; 261e759959fSBrijesh Singh } 262e759959fSBrijesh Singh 263e759959fSBrijesh Singh finish: 264e759959fSBrijesh Singh if (result == ES_OK) 265e759959fSBrijesh Singh vc_finish_insn(&ctxt); 266e759959fSBrijesh Singh else if (result != ES_RETRY) 2676c0f74d6SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ); 268e759959fSBrijesh Singh } 269ec1c66afSMichael Roth 27081cc3df9SBrijesh Singh static void enforce_vmpl0(void) 27181cc3df9SBrijesh Singh { 27281cc3df9SBrijesh Singh u64 attrs; 27381cc3df9SBrijesh Singh int err; 27481cc3df9SBrijesh Singh 27581cc3df9SBrijesh Singh /* 27681cc3df9SBrijesh Singh * RMPADJUST modifies RMP permissions of a lesser-privileged (numerically 27781cc3df9SBrijesh Singh * higher) privilege level. Here, clear the VMPL1 permission mask of the 27881cc3df9SBrijesh Singh * GHCB page. If the guest is not running at VMPL0, this will fail. 27981cc3df9SBrijesh Singh * 28081cc3df9SBrijesh Singh * If the guest is running at VMPL0, it will succeed. Even if that operation 28181cc3df9SBrijesh Singh * modifies permission bits, it is still ok to do so currently because Linux 28281cc3df9SBrijesh Singh * SNP guests are supported only on VMPL0 so VMPL1 or higher permission masks 28381cc3df9SBrijesh Singh * changing is a don't-care. 28481cc3df9SBrijesh Singh */ 28581cc3df9SBrijesh Singh attrs = 1; 28681cc3df9SBrijesh Singh if (rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, attrs)) 28781cc3df9SBrijesh Singh sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0); 28881cc3df9SBrijesh Singh } 28981cc3df9SBrijesh Singh 290*8c29f016SNikunj A Dadhania /* 291*8c29f016SNikunj A Dadhania * SNP_FEATURES_IMPL_REQ is the mask of SNP features that will need 292*8c29f016SNikunj A Dadhania * guest side implementation for proper functioning of the guest. If any 293*8c29f016SNikunj A Dadhania * of these features are enabled in the hypervisor but are lacking guest 294*8c29f016SNikunj A Dadhania * side implementation, the behavior of the guest will be undefined. The 295*8c29f016SNikunj A Dadhania * guest could fail in non-obvious way making it difficult to debug. 296*8c29f016SNikunj A Dadhania * 297*8c29f016SNikunj A Dadhania * As the behavior of reserved feature bits is unknown to be on the 298*8c29f016SNikunj A Dadhania * safe side add them to the required features mask. 299*8c29f016SNikunj A Dadhania */ 300*8c29f016SNikunj A Dadhania #define SNP_FEATURES_IMPL_REQ (MSR_AMD64_SNP_VTOM | \ 301*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_REFLECT_VC | \ 302*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_RESTRICTED_INJ | \ 303*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_ALT_INJ | \ 304*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_DEBUG_SWAP | \ 305*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_VMPL_SSS | \ 306*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_SECURE_TSC | \ 307*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_VMGEXIT_PARAM | \ 308*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_VMSA_REG_PROTECTION | \ 309*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_RESERVED_BIT13 | \ 310*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_RESERVED_BIT15 | \ 311*8c29f016SNikunj A Dadhania MSR_AMD64_SNP_RESERVED_MASK) 312*8c29f016SNikunj A Dadhania 313*8c29f016SNikunj A Dadhania /* 314*8c29f016SNikunj A Dadhania * SNP_FEATURES_PRESENT is the mask of SNP features that are implemented 315*8c29f016SNikunj A Dadhania * by the guest kernel. As and when a new feature is implemented in the 316*8c29f016SNikunj A Dadhania * guest kernel, a corresponding bit should be added to the mask. 317*8c29f016SNikunj A Dadhania */ 318*8c29f016SNikunj A Dadhania #define SNP_FEATURES_PRESENT (0) 319*8c29f016SNikunj A Dadhania 320*8c29f016SNikunj A Dadhania void snp_check_features(void) 321*8c29f016SNikunj A Dadhania { 322*8c29f016SNikunj A Dadhania u64 unsupported; 323*8c29f016SNikunj A Dadhania 324*8c29f016SNikunj A Dadhania if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) 325*8c29f016SNikunj A Dadhania return; 326*8c29f016SNikunj A Dadhania 327*8c29f016SNikunj A Dadhania /* 328*8c29f016SNikunj A Dadhania * Terminate the boot if hypervisor has enabled any feature lacking 329*8c29f016SNikunj A Dadhania * guest side implementation. Pass on the unsupported features mask through 330*8c29f016SNikunj A Dadhania * EXIT_INFO_2 of the GHCB protocol so that those features can be reported 331*8c29f016SNikunj A Dadhania * as part of the guest boot failure. 332*8c29f016SNikunj A Dadhania */ 333*8c29f016SNikunj A Dadhania unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT; 334*8c29f016SNikunj A Dadhania if (unsupported) { 335*8c29f016SNikunj A Dadhania if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb())) 336*8c29f016SNikunj A Dadhania sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 337*8c29f016SNikunj A Dadhania 338*8c29f016SNikunj A Dadhania sev_es_ghcb_terminate(boot_ghcb, SEV_TERM_SET_GEN, 339*8c29f016SNikunj A Dadhania GHCB_SNP_UNSUPPORTED, unsupported); 340*8c29f016SNikunj A Dadhania } 341*8c29f016SNikunj A Dadhania } 342*8c29f016SNikunj A Dadhania 343ec1c66afSMichael Roth void sev_enable(struct boot_params *bp) 344ec1c66afSMichael Roth { 345ec1c66afSMichael Roth unsigned int eax, ebx, ecx, edx; 346ec1c66afSMichael Roth struct msr m; 347c01fce9cSMichael Roth bool snp; 348c01fce9cSMichael Roth 349c01fce9cSMichael Roth /* 3504b1c7424SMichael Roth * bp->cc_blob_address should only be set by boot/compressed kernel. 3514b1c7424SMichael Roth * Initialize it to 0 to ensure that uninitialized values from 3524b1c7424SMichael Roth * buggy bootloaders aren't propagated. 3534b1c7424SMichael Roth */ 3544b1c7424SMichael Roth if (bp) 3554b1c7424SMichael Roth bp->cc_blob_address = 0; 3564b1c7424SMichael Roth 3574b1c7424SMichael Roth /* 358c01fce9cSMichael Roth * Setup/preliminary detection of SNP. This will be sanity-checked 359c01fce9cSMichael Roth * against CPUID/MSR values later. 360c01fce9cSMichael Roth */ 361c01fce9cSMichael Roth snp = snp_init(bp); 362ec1c66afSMichael Roth 363ec1c66afSMichael Roth /* Check for the SME/SEV support leaf */ 364ec1c66afSMichael Roth eax = 0x80000000; 365ec1c66afSMichael Roth ecx = 0; 366ec1c66afSMichael Roth native_cpuid(&eax, &ebx, &ecx, &edx); 367ec1c66afSMichael Roth if (eax < 0x8000001f) 368ec1c66afSMichael Roth return; 369ec1c66afSMichael Roth 370ec1c66afSMichael Roth /* 371ec1c66afSMichael Roth * Check for the SME/SEV feature: 372ec1c66afSMichael Roth * CPUID Fn8000_001F[EAX] 373ec1c66afSMichael Roth * - Bit 0 - Secure Memory Encryption support 374ec1c66afSMichael Roth * - Bit 1 - Secure Encrypted Virtualization support 375ec1c66afSMichael Roth * CPUID Fn8000_001F[EBX] 376ec1c66afSMichael Roth * - Bits 5:0 - Pagetable bit position used to indicate encryption 377ec1c66afSMichael Roth */ 378ec1c66afSMichael Roth eax = 0x8000001f; 379ec1c66afSMichael Roth ecx = 0; 380ec1c66afSMichael Roth native_cpuid(&eax, &ebx, &ecx, &edx); 381ec1c66afSMichael Roth /* Check whether SEV is supported */ 382c01fce9cSMichael Roth if (!(eax & BIT(1))) { 383c01fce9cSMichael Roth if (snp) 384c01fce9cSMichael Roth error("SEV-SNP support indicated by CC blob, but not CPUID."); 385ec1c66afSMichael Roth return; 386c01fce9cSMichael Roth } 387ec1c66afSMichael Roth 388ec1c66afSMichael Roth /* Set the SME mask if this is an SEV guest. */ 389ec1c66afSMichael Roth boot_rdmsr(MSR_AMD64_SEV, &m); 390ec1c66afSMichael Roth sev_status = m.q; 391ec1c66afSMichael Roth if (!(sev_status & MSR_AMD64_SEV_ENABLED)) 392ec1c66afSMichael Roth return; 393ec1c66afSMichael Roth 394cbd3d4f7SBrijesh Singh /* Negotiate the GHCB protocol version. */ 395cbd3d4f7SBrijesh Singh if (sev_status & MSR_AMD64_SEV_ES_ENABLED) { 396cbd3d4f7SBrijesh Singh if (!sev_es_negotiate_protocol()) 397cbd3d4f7SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_PROT_UNSUPPORTED); 398cbd3d4f7SBrijesh Singh } 399cbd3d4f7SBrijesh Singh 400cbd3d4f7SBrijesh Singh /* 401cbd3d4f7SBrijesh Singh * SNP is supported in v2 of the GHCB spec which mandates support for HV 402cbd3d4f7SBrijesh Singh * features. 403cbd3d4f7SBrijesh Singh */ 40481cc3df9SBrijesh Singh if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) { 40581cc3df9SBrijesh Singh if (!(get_hv_features() & GHCB_HV_FT_SNP)) 406cbd3d4f7SBrijesh Singh sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 407cbd3d4f7SBrijesh Singh 40881cc3df9SBrijesh Singh enforce_vmpl0(); 40981cc3df9SBrijesh Singh } 41081cc3df9SBrijesh Singh 411c01fce9cSMichael Roth if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) 412c01fce9cSMichael Roth error("SEV-SNP supported indicated by CC blob, but not SEV status MSR."); 413c01fce9cSMichael Roth 414ec1c66afSMichael Roth sme_me_mask = BIT_ULL(ebx & 0x3f); 415ec1c66afSMichael Roth } 416c01fce9cSMichael Roth 417c01fce9cSMichael Roth /* Search for Confidential Computing blob in the EFI config table. */ 418c01fce9cSMichael Roth static struct cc_blob_sev_info *find_cc_blob_efi(struct boot_params *bp) 419c01fce9cSMichael Roth { 420c01fce9cSMichael Roth unsigned long cfg_table_pa; 421c01fce9cSMichael Roth unsigned int cfg_table_len; 422c01fce9cSMichael Roth int ret; 423c01fce9cSMichael Roth 424c01fce9cSMichael Roth ret = efi_get_conf_table(bp, &cfg_table_pa, &cfg_table_len); 425c01fce9cSMichael Roth if (ret) 426c01fce9cSMichael Roth return NULL; 427c01fce9cSMichael Roth 428c01fce9cSMichael Roth return (struct cc_blob_sev_info *)efi_find_vendor_table(bp, cfg_table_pa, 429c01fce9cSMichael Roth cfg_table_len, 430c01fce9cSMichael Roth EFI_CC_BLOB_GUID); 431c01fce9cSMichael Roth } 432c01fce9cSMichael Roth 433c01fce9cSMichael Roth /* 434c01fce9cSMichael Roth * Initial set up of SNP relies on information provided by the 435c01fce9cSMichael Roth * Confidential Computing blob, which can be passed to the boot kernel 436c01fce9cSMichael Roth * by firmware/bootloader in the following ways: 437c01fce9cSMichael Roth * 438c01fce9cSMichael Roth * - via an entry in the EFI config table 439c01fce9cSMichael Roth * - via a setup_data structure, as defined by the Linux Boot Protocol 440c01fce9cSMichael Roth * 441c01fce9cSMichael Roth * Scan for the blob in that order. 442c01fce9cSMichael Roth */ 443c01fce9cSMichael Roth static struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp) 444c01fce9cSMichael Roth { 445c01fce9cSMichael Roth struct cc_blob_sev_info *cc_info; 446c01fce9cSMichael Roth 447c01fce9cSMichael Roth cc_info = find_cc_blob_efi(bp); 448c01fce9cSMichael Roth if (cc_info) 449c01fce9cSMichael Roth goto found_cc_info; 450c01fce9cSMichael Roth 451c01fce9cSMichael Roth cc_info = find_cc_blob_setup_data(bp); 452c01fce9cSMichael Roth if (!cc_info) 453c01fce9cSMichael Roth return NULL; 454c01fce9cSMichael Roth 455c01fce9cSMichael Roth found_cc_info: 456c01fce9cSMichael Roth if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC) 457c01fce9cSMichael Roth sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 458c01fce9cSMichael Roth 459c01fce9cSMichael Roth return cc_info; 460c01fce9cSMichael Roth } 461c01fce9cSMichael Roth 462c01fce9cSMichael Roth /* 463c01fce9cSMichael Roth * Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks 464c01fce9cSMichael Roth * will verify the SNP CPUID/MSR bits. 465c01fce9cSMichael Roth */ 466c01fce9cSMichael Roth bool snp_init(struct boot_params *bp) 467c01fce9cSMichael Roth { 468c01fce9cSMichael Roth struct cc_blob_sev_info *cc_info; 469c01fce9cSMichael Roth 470c01fce9cSMichael Roth if (!bp) 471c01fce9cSMichael Roth return false; 472c01fce9cSMichael Roth 473c01fce9cSMichael Roth cc_info = find_cc_blob(bp); 474c01fce9cSMichael Roth if (!cc_info) 475c01fce9cSMichael Roth return false; 476c01fce9cSMichael Roth 477c01fce9cSMichael Roth /* 4785f211f4fSMichael Roth * If a SNP-specific Confidential Computing blob is present, then 4795f211f4fSMichael Roth * firmware/bootloader have indicated SNP support. Verifying this 4805f211f4fSMichael Roth * involves CPUID checks which will be more reliable if the SNP 4815f211f4fSMichael Roth * CPUID table is used. See comments over snp_setup_cpuid_table() for 4825f211f4fSMichael Roth * more details. 4835f211f4fSMichael Roth */ 4845f211f4fSMichael Roth setup_cpuid_table(cc_info); 4855f211f4fSMichael Roth 4865f211f4fSMichael Roth /* 487c01fce9cSMichael Roth * Pass run-time kernel a pointer to CC info via boot_params so EFI 488c01fce9cSMichael Roth * config table doesn't need to be searched again during early startup 489c01fce9cSMichael Roth * phase. 490c01fce9cSMichael Roth */ 491c01fce9cSMichael Roth bp->cc_blob_address = (u32)(unsigned long)cc_info; 492c01fce9cSMichael Roth 493c01fce9cSMichael Roth return true; 494c01fce9cSMichael Roth } 49576f61e1eSMichael Roth 49676f61e1eSMichael Roth void sev_prep_identity_maps(unsigned long top_level_pgt) 49776f61e1eSMichael Roth { 49876f61e1eSMichael Roth /* 49976f61e1eSMichael Roth * The Confidential Computing blob is used very early in uncompressed 50076f61e1eSMichael Roth * kernel to find the in-memory CPUID table to handle CPUID 50176f61e1eSMichael Roth * instructions. Make sure an identity-mapping exists so it can be 50276f61e1eSMichael Roth * accessed after switchover. 50376f61e1eSMichael Roth */ 50476f61e1eSMichael Roth if (sev_snp_enabled()) { 50576f61e1eSMichael Roth unsigned long cc_info_pa = boot_params->cc_blob_address; 50676f61e1eSMichael Roth struct cc_blob_sev_info *cc_info; 50776f61e1eSMichael Roth 50876f61e1eSMichael Roth kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info)); 50976f61e1eSMichael Roth 51076f61e1eSMichael Roth cc_info = (struct cc_blob_sev_info *)cc_info_pa; 51176f61e1eSMichael Roth kernel_add_identity_map(cc_info->cpuid_phys, cc_info->cpuid_phys + cc_info->cpuid_len); 51276f61e1eSMichael Roth } 51376f61e1eSMichael Roth 51476f61e1eSMichael Roth sev_verify_cbit(top_level_pgt); 51576f61e1eSMichael Roth } 516