Lines Matching +full:hi +full:- +full:fi
1 // SPDX-License-Identifier: GPL-2.0
7 * This file is not compiled stand-alone. It contains code shared
8 * between the pre-decompression boot code and the running Linux kernel
9 * and is included directly into both code-bases.
20 /* I/O parameters for CPUID-related helpers */
48 * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
73 * These will be initialized based on CPUID table so that non-present
74 * all-zero leaves (for sparse tables) can be differentiated from
75 * invalid/out-of-range leaves. This is needed since all-zero leaves
76 * still need to be post-processed.
85 error("RDRAND instruction not supported - no trusted source of randomness available\n"); in sev_es_check_cpu_features()
167 ghcb->save.sw_exit_code = 0; in vc_ghcb_invalidate()
168 __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); in vc_ghcb_invalidate()
185 ctxt->regs = regs; in vc_init_em_ctxt()
195 ctxt->regs->ip += ctxt->insn.length; in vc_finish_insn()
202 ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0); in verify_exception_info()
207 u64 info = ghcb->save.sw_exit_info_2; in verify_exception_info()
214 ctxt->fi.vector = v; in verify_exception_info()
217 ctxt->fi.error_code = info >> 32; in verify_exception_info()
232 ghcb->protocol_version = ghcb_version; in sev_es_ghcb_hv_call()
233 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; in sev_es_ghcb_hv_call()
253 return -EIO; in __sev_cpuid_hv()
265 * MSR protocol does not support fetching non-zero subfunctions, but is in __sev_cpuid_hv_msr()
266 * sufficient to handle current early-boot cases. Should that change, in __sev_cpuid_hv_msr()
269 * can be added here to use GHCB-page protocol for cases that occur late in __sev_cpuid_hv_msr()
272 if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn) in __sev_cpuid_hv_msr()
273 return -EINVAL; in __sev_cpuid_hv_msr()
275 ret = __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax); in __sev_cpuid_hv_msr()
276 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx); in __sev_cpuid_hv_msr()
277 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx); in __sev_cpuid_hv_msr()
278 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx); in __sev_cpuid_hv_msr()
288 ghcb_set_rax(ghcb, leaf->fn); in __sev_cpuid_hv_ghcb()
289 ghcb_set_rcx(ghcb, leaf->subfn); in __sev_cpuid_hv_ghcb()
295 /* xgetbv will cause #UD - use reset value for xcr0 */ in __sev_cpuid_hv_ghcb()
308 leaf->eax = ghcb->save.rax; in __sev_cpuid_hv_ghcb()
309 leaf->ebx = ghcb->save.rbx; in __sev_cpuid_hv_ghcb()
310 leaf->ecx = ghcb->save.rcx; in __sev_cpuid_hv_ghcb()
311 leaf->edx = ghcb->save.rdx; in __sev_cpuid_hv_ghcb()
324 * mapping. Use RIP-relative addressing to obtain the correct address
326 * switch-over to kernel virtual addresses later.
360 for (i = 0; i < cpuid_table->count; i++) { in snp_cpuid_calc_xsave_size()
361 const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; in snp_cpuid_calc_xsave_size()
363 if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64)) in snp_cpuid_calc_xsave_size()
365 if (!(xfeatures_en & (BIT_ULL(e->ecx_in)))) in snp_cpuid_calc_xsave_size()
367 if (xfeatures_found & (BIT_ULL(e->ecx_in))) in snp_cpuid_calc_xsave_size()
370 xfeatures_found |= (BIT_ULL(e->ecx_in)); in snp_cpuid_calc_xsave_size()
373 xsave_size += e->eax; in snp_cpuid_calc_xsave_size()
375 xsave_size = max(xsave_size, e->eax + e->ebx); in snp_cpuid_calc_xsave_size()
395 for (i = 0; i < cpuid_table->count; i++) { in snp_cpuid_get_validated_func()
396 const struct snp_cpuid_fn *e = &cpuid_table->fn[i]; in snp_cpuid_get_validated_func()
398 if (e->eax_in != leaf->fn) in snp_cpuid_get_validated_func()
401 if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn) in snp_cpuid_get_validated_func()
410 if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1)) in snp_cpuid_get_validated_func()
411 if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in) in snp_cpuid_get_validated_func()
414 leaf->eax = e->eax; in snp_cpuid_get_validated_func()
415 leaf->ebx = e->ebx; in snp_cpuid_get_validated_func()
416 leaf->ecx = e->ecx; in snp_cpuid_get_validated_func()
417 leaf->edx = e->edx; in snp_cpuid_get_validated_func()
436 switch (leaf->fn) { in snp_cpuid_postprocess()
441 leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0)); in snp_cpuid_postprocess()
443 leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9)); in snp_cpuid_postprocess()
447 leaf->ecx |= BIT(27); in snp_cpuid_postprocess()
451 leaf->ecx &= ~BIT(4); in snp_cpuid_postprocess()
453 leaf->ecx |= BIT(4); in snp_cpuid_postprocess()
460 leaf->edx = leaf_hv.edx; in snp_cpuid_postprocess()
467 if (leaf->subfn != 0 && leaf->subfn != 1) in snp_cpuid_postprocess()
472 if (leaf->subfn == 1) { in snp_cpuid_postprocess()
474 if (leaf->eax & BIT(3)) { in snp_cpuid_postprocess()
475 unsigned long lo, hi; in snp_cpuid_postprocess() local
477 asm volatile("rdmsr" : "=a" (lo), "=d" (hi) in snp_cpuid_postprocess()
479 xss = (hi << 32) | lo; in snp_cpuid_postprocess()
486 * bit 3) since SNP-capable hardware has these feature in snp_cpuid_postprocess()
491 if (!(leaf->eax & (BIT(1) | BIT(3)))) in snp_cpuid_postprocess()
492 return -EINVAL; in snp_cpuid_postprocess()
499 return -EINVAL; in snp_cpuid_postprocess()
501 leaf->ebx = xsave_size; in snp_cpuid_postprocess()
508 leaf->eax = leaf_hv.eax; in snp_cpuid_postprocess()
510 leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0)); in snp_cpuid_postprocess()
512 leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0)); in snp_cpuid_postprocess()
515 /* No fix-ups needed, use values as-is. */ in snp_cpuid_postprocess()
523 * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
531 if (!cpuid_table->count) in snp_cpuid()
532 return -EOPNOTSUPP; in snp_cpuid()
538 * same as out-of-range values (all-zero). This is useful here in snp_cpuid()
543 * out-of-range entries and in-range zero entries, since the in snp_cpuid()
546 * CPU-specific information during post-processing. So if it's in snp_cpuid()
548 * within a valid CPUID range, proceed with post-processing in snp_cpuid()
550 * post-processing and just return zeros immediately. in snp_cpuid()
552 leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; in snp_cpuid()
554 /* Skip post-processing for out-of-range zero leafs. */ in snp_cpuid()
555 if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) || in snp_cpuid()
556 (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) || in snp_cpuid()
557 (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max)))) in snp_cpuid()
565 * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
567 * hypervisor and only the CPUID exit-code.
571 unsigned int subfn = lower_bits(regs->cx, 32); in do_vc_no_ghcb()
572 unsigned int fn = lower_bits(regs->ax, 32); in do_vc_no_ghcb()
587 if (ret != -EOPNOTSUPP) in do_vc_no_ghcb()
594 regs->ax = leaf.eax; in do_vc_no_ghcb()
595 regs->bx = leaf.ebx; in do_vc_no_ghcb()
596 regs->cx = leaf.ecx; in do_vc_no_ghcb()
597 regs->dx = leaf.edx; in do_vc_no_ghcb()
600 * This is a VC handler and the #VC is only raised when SEV-ES is in do_vc_no_ghcb()
603 * into the no-sev path. This could map sensitive data unencrypted and in do_vc_no_ghcb()
607 * - Availability of CPUID leaf 0x8000001f in do_vc_no_ghcb()
608 * - SEV CPUID bit. in do_vc_no_ghcb()
610 * The hypervisor might still report the wrong C-bit position, but this in do_vc_no_ghcb()
614 if (fn == 0x80000000 && (regs->ax < 0x8000001f)) in do_vc_no_ghcb()
617 else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) in do_vc_no_ghcb()
621 /* Skip over the CPUID two-byte opcode */ in do_vc_no_ghcb()
622 regs->ip += 2; in do_vc_no_ghcb()
635 if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) { in vc_insn_string_check()
636 ctxt->fi.vector = X86_TRAP_PF; in vc_insn_string_check()
637 ctxt->fi.error_code = X86_PF_USER; in vc_insn_string_check()
638 ctxt->fi.cr2 = address; in vc_insn_string_check()
640 ctxt->fi.error_code |= X86_PF_WRITE; in vc_insn_string_check()
654 int i, b = backwards ? -1 : 1; in vc_insn_string_read()
680 int i, s = backwards ? -1 : 1; in vc_insn_string_write()
721 struct insn *insn = &ctxt->insn; in vc_ioio_exitinfo()
727 switch (insn->opcode.bytes[0]) { in vc_ioio_exitinfo()
733 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
741 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
748 port = (u8)insn->immediate.value & 0xffff; in vc_ioio_exitinfo()
755 port = (u8)insn->immediate.value & 0xffff; in vc_ioio_exitinfo()
762 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
769 port = ctxt->regs->dx & 0xffff; in vc_ioio_exitinfo()
778 switch (insn->opcode.bytes[0]) { in vc_ioio_exitinfo()
791 *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16 in vc_ioio_exitinfo()
793 size = (insn->opnd_bytes == 2) ? 2 : 4; in vc_ioio_exitinfo()
796 switch (insn->addr_bytes) { in vc_ioio_exitinfo()
816 struct pt_regs *regs = ctxt->regs; in vc_handle_ioio()
828 bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF); in vc_handle_ioio()
837 * has a chance to take interrupts and re-schedule while the in vc_handle_ioio()
841 ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes; in vc_handle_ioio()
843 op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1; in vc_handle_ioio()
847 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES); in vc_handle_ioio()
852 (void *)(es_base + regs->si), in vc_handle_ioio()
853 ghcb->shared_buffer, io_bytes, in vc_handle_ioio()
874 (void *)(es_base + regs->di), in vc_handle_ioio()
875 ghcb->shared_buffer, io_bytes, in vc_handle_ioio()
881 regs->di -= exit_bytes; in vc_handle_ioio()
883 regs->di += exit_bytes; in vc_handle_ioio()
886 regs->si -= exit_bytes; in vc_handle_ioio()
888 regs->si += exit_bytes; in vc_handle_ioio()
892 regs->cx -= exit_info_2; in vc_handle_ioio()
894 ret = regs->cx ? ES_RETRY : ES_OK; in vc_handle_ioio()
904 rax = lower_bits(regs->ax, bits); in vc_handle_ioio()
915 regs->ax = lower_bits(ghcb->save.rax, bits); in vc_handle_ioio()
924 struct pt_regs *regs = ctxt->regs; in vc_handle_cpuid_snp()
928 leaf.fn = regs->ax; in vc_handle_cpuid_snp()
929 leaf.subfn = regs->cx; in vc_handle_cpuid_snp()
932 regs->ax = leaf.eax; in vc_handle_cpuid_snp()
933 regs->bx = leaf.ebx; in vc_handle_cpuid_snp()
934 regs->cx = leaf.ecx; in vc_handle_cpuid_snp()
935 regs->dx = leaf.edx; in vc_handle_cpuid_snp()
944 struct pt_regs *regs = ctxt->regs; in vc_handle_cpuid()
952 if (snp_cpuid_ret != -EOPNOTSUPP) in vc_handle_cpuid()
955 ghcb_set_rax(ghcb, regs->ax); in vc_handle_cpuid()
956 ghcb_set_rcx(ghcb, regs->cx); in vc_handle_cpuid()
962 /* xgetbv will cause #GP - use reset value for xcr0 */ in vc_handle_cpuid()
975 regs->ax = ghcb->save.rax; in vc_handle_cpuid()
976 regs->bx = ghcb->save.rbx; in vc_handle_cpuid()
977 regs->cx = ghcb->save.rcx; in vc_handle_cpuid()
978 regs->dx = ghcb->save.rdx; in vc_handle_cpuid()
998 ctxt->regs->ax = ghcb->save.rax; in vc_handle_rdtsc()
999 ctxt->regs->dx = ghcb->save.rdx; in vc_handle_rdtsc()
1001 ctxt->regs->cx = ghcb->save.rcx; in vc_handle_rdtsc()
1021 hdr = (struct setup_data *)bp->hdr.setup_data; in find_cc_blob_setup_data()
1024 if (hdr->type == SETUP_CC_BLOB) { in find_cc_blob_setup_data()
1026 return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address; in find_cc_blob_setup_data()
1028 hdr = (struct setup_data *)hdr->next; in find_cc_blob_setup_data()
1048 if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE) in setup_cpuid_table()
1051 cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys; in setup_cpuid_table()
1052 if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX) in setup_cpuid_table()
1058 /* Initialize CPUID ranges for range-checking. */ in setup_cpuid_table()
1059 for (i = 0; i < cpuid_table->count; i++) { in setup_cpuid_table()
1060 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; in setup_cpuid_table()
1062 if (fn->eax_in == 0x0) in setup_cpuid_table()
1063 RIP_REL_REF(cpuid_std_range_max) = fn->eax; in setup_cpuid_table()
1064 else if (fn->eax_in == 0x40000000) in setup_cpuid_table()
1065 RIP_REL_REF(cpuid_hyp_range_max) = fn->eax; in setup_cpuid_table()
1066 else if (fn->eax_in == 0x80000000) in setup_cpuid_table()
1067 RIP_REL_REF(cpuid_ext_range_max) = fn->eax; in setup_cpuid_table()
1080 for (i = 0; i <= desc->hdr.end_entry; i++) { in pvalidate_pages()
1081 e = &desc->entries[i]; in pvalidate_pages()
1083 vaddr = (unsigned long)pfn_to_kaddr(e->gfn); in pvalidate_pages()
1084 size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; in pvalidate_pages()
1085 validate = e->operation == SNP_PAGE_STATE_PRIVATE; in pvalidate_pages()
1114 data = (struct snp_psc_desc *)ghcb->shared_buffer; in vmgexit_psc()
1115 memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc))); in vmgexit_psc()
1122 * reference the data->hdr everywhere. in vmgexit_psc()
1129 cur_entry = data->hdr.cur_entry; in vmgexit_psc()
1130 end_entry = data->hdr.end_entry; in vmgexit_psc()
1132 while (data->hdr.cur_entry <= data->hdr.end_entry) { in vmgexit_psc()
1142 if (WARN(ret || ghcb->save.sw_exit_info_2, in vmgexit_psc()
1144 ret, ghcb->save.sw_exit_info_2)) { in vmgexit_psc()
1150 if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) { in vmgexit_psc()
1159 if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry, in vmgexit_psc()
1161 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) { in vmgexit_psc()