1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 25393744bSJeremy Fitzhardinge/* 3edcb5cf8SJuergen Gross * Asm versions of Xen pv-ops, suitable for direct use. 4130ace11STejun Heo * 5130ace11STejun Heo * We only bother with direct forms (ie, vcpu in percpu data) of the 6edcb5cf8SJuergen Gross * operations here; the indirect forms are better handled in C. 75393744bSJeremy Fitzhardinge */ 85393744bSJeremy Fitzhardinge 956415c4cSJuergen Gross#include <asm/errno.h> 105393744bSJeremy Fitzhardinge#include <asm/asm-offsets.h> 115393744bSJeremy Fitzhardinge#include <asm/percpu.h> 125393744bSJeremy Fitzhardinge#include <asm/processor-flags.h> 1356415c4cSJuergen Gross#include <asm/segment.h> 1456415c4cSJuergen Gross#include <asm/thread_info.h> 1555aedddbSPeter Zijlstra#include <asm/asm.h> 1656415c4cSJuergen Gross#include <asm/frame.h> 17cde07a4eSJosh Poimboeuf#include <asm/unwind_hints.h> 185393744bSJeremy Fitzhardinge 1956415c4cSJuergen Gross#include <xen/interface/xen.h> 2056415c4cSJuergen Gross 2156415c4cSJuergen Gross#include <linux/init.h> 22edcb5cf8SJuergen Gross#include <linux/linkage.h> 235c8f6a2eSLai Jiangshan#include <../entry/calling.h> 245393744bSJeremy Fitzhardinge 2509c41307SPeter Zijlstra.pushsection .noinstr.text, "ax" 265393744bSJeremy Fitzhardinge/* 27130ace11STejun Heo * Disabling events is simply a matter of making the event mask 28130ace11STejun Heo * non-zero. 295393744bSJeremy Fitzhardinge */ 306dcc5627SJiri SlabySYM_FUNC_START(xen_irq_disable_direct) 315393744bSJeremy Fitzhardinge movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 32f94909ceSPeter Zijlstra RET 336dcc5627SJiri SlabySYM_FUNC_END(xen_irq_disable_direct) 345393744bSJeremy Fitzhardinge 355393744bSJeremy Fitzhardinge/* 36130ace11STejun Heo * Force an event check by making a hypercall, but preserve regs 37130ace11STejun Heo * before making the call. 385393744bSJeremy Fitzhardinge */ 396dcc5627SJiri SlabySYM_FUNC_START(check_events) 408be0eb7eSJosh Poimboeuf FRAME_BEGIN 415393744bSJeremy Fitzhardinge push %rax 425393744bSJeremy Fitzhardinge push %rcx 435393744bSJeremy Fitzhardinge push %rdx 445393744bSJeremy Fitzhardinge push %rsi 455393744bSJeremy Fitzhardinge push %rdi 465393744bSJeremy Fitzhardinge push %r8 475393744bSJeremy Fitzhardinge push %r9 485393744bSJeremy Fitzhardinge push %r10 495393744bSJeremy Fitzhardinge push %r11 505393744bSJeremy Fitzhardinge call xen_force_evtchn_callback 515393744bSJeremy Fitzhardinge pop %r11 525393744bSJeremy Fitzhardinge pop %r10 535393744bSJeremy Fitzhardinge pop %r9 545393744bSJeremy Fitzhardinge pop %r8 555393744bSJeremy Fitzhardinge pop %rdi 565393744bSJeremy Fitzhardinge pop %rsi 575393744bSJeremy Fitzhardinge pop %rdx 585393744bSJeremy Fitzhardinge pop %rcx 595393744bSJeremy Fitzhardinge pop %rax 608be0eb7eSJosh Poimboeuf FRAME_END 61f94909ceSPeter Zijlstra RET 626dcc5627SJiri SlabySYM_FUNC_END(check_events) 6355aedddbSPeter Zijlstra 64d7bfc7d5SPeter Zijlstra/* 65d7bfc7d5SPeter Zijlstra * Enable events. This clears the event mask and tests the pending 66d7bfc7d5SPeter Zijlstra * event status with one and operation. If there are pending events, 67d7bfc7d5SPeter Zijlstra * then enter the hypervisor to get them handled. 68d7bfc7d5SPeter Zijlstra */ 69d7bfc7d5SPeter ZijlstraSYM_FUNC_START(xen_irq_enable_direct) 70d7bfc7d5SPeter Zijlstra FRAME_BEGIN 71d7bfc7d5SPeter Zijlstra /* Unmask events */ 72d7bfc7d5SPeter Zijlstra movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 73d7bfc7d5SPeter Zijlstra 74d7bfc7d5SPeter Zijlstra /* 75d7bfc7d5SPeter Zijlstra * Preempt here doesn't matter because that will deal with any 76d7bfc7d5SPeter Zijlstra * pending interrupts. The pending check may end up being run 77d7bfc7d5SPeter Zijlstra * on the wrong CPU, but that doesn't hurt. 78d7bfc7d5SPeter Zijlstra */ 79d7bfc7d5SPeter Zijlstra 80d7bfc7d5SPeter Zijlstra /* Test for pending */ 81d7bfc7d5SPeter Zijlstra testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 82d7bfc7d5SPeter Zijlstra jz 1f 83d7bfc7d5SPeter Zijlstra 84d7bfc7d5SPeter Zijlstra call check_events 85d7bfc7d5SPeter Zijlstra1: 86d7bfc7d5SPeter Zijlstra FRAME_END 87f94909ceSPeter Zijlstra RET 88d7bfc7d5SPeter ZijlstraSYM_FUNC_END(xen_irq_enable_direct) 89d7bfc7d5SPeter Zijlstra 9020125c87SPeter Zijlstra/* 9120125c87SPeter Zijlstra * (xen_)save_fl is used to get the current interrupt enable status. 9220125c87SPeter Zijlstra * Callers expect the status to be in X86_EFLAGS_IF, and other bits 9320125c87SPeter Zijlstra * may be set in the return value. We take advantage of this by 9420125c87SPeter Zijlstra * making sure that X86_EFLAGS_IF has the right value (and other bits 9520125c87SPeter Zijlstra * in that byte are 0), but other bits in the return value are 9620125c87SPeter Zijlstra * undefined. We need to toggle the state of the bit, because Xen and 9720125c87SPeter Zijlstra * x86 use opposite senses (mask vs enable). 9820125c87SPeter Zijlstra */ 9920125c87SPeter ZijlstraSYM_FUNC_START(xen_save_fl_direct) 10020125c87SPeter Zijlstra testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 10120125c87SPeter Zijlstra setz %ah 10220125c87SPeter Zijlstra addb %ah, %ah 103f94909ceSPeter Zijlstra RET 10420125c87SPeter ZijlstraSYM_FUNC_END(xen_save_fl_direct) 10520125c87SPeter Zijlstra 1066dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2) 10755aedddbSPeter Zijlstra FRAME_BEGIN 10855aedddbSPeter Zijlstra _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX 10955aedddbSPeter Zijlstra _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX 11055aedddbSPeter Zijlstra FRAME_END 111f94909ceSPeter Zijlstra RET 1126dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2); 11355aedddbSPeter Zijlstra 1146dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2_direct) 11555aedddbSPeter Zijlstra FRAME_BEGIN 11655aedddbSPeter Zijlstra _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX 11755aedddbSPeter Zijlstra FRAME_END 118f94909ceSPeter Zijlstra RET 1196dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2_direct); 1200a53c9acSPeter Zijlstra.popsection 12156415c4cSJuergen Gross 12256415c4cSJuergen Gross.macro xen_pv_trap name 12356415c4cSJuergen GrossSYM_CODE_START(xen_\name) 124cde07a4eSJosh Poimboeuf UNWIND_HINT_EMPTY 12556415c4cSJuergen Gross pop %rcx 12656415c4cSJuergen Gross pop %r11 12756415c4cSJuergen Gross jmp \name 12856415c4cSJuergen GrossSYM_CODE_END(xen_\name) 12956415c4cSJuergen Gross_ASM_NOKPROBE(xen_\name) 13056415c4cSJuergen Gross.endm 13156415c4cSJuergen Gross 13256415c4cSJuergen Grossxen_pv_trap asm_exc_divide_error 13356415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_debug 13456415c4cSJuergen Grossxen_pv_trap asm_exc_int3 13556415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_nmi 13656415c4cSJuergen Grossxen_pv_trap asm_exc_overflow 13756415c4cSJuergen Grossxen_pv_trap asm_exc_bounds 13856415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_op 13956415c4cSJuergen Grossxen_pv_trap asm_exc_device_not_available 1405b4c6d65SJuergen Grossxen_pv_trap asm_xenpv_exc_double_fault 14156415c4cSJuergen Grossxen_pv_trap asm_exc_coproc_segment_overrun 14256415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_tss 14356415c4cSJuergen Grossxen_pv_trap asm_exc_segment_not_present 14456415c4cSJuergen Grossxen_pv_trap asm_exc_stack_segment 14556415c4cSJuergen Grossxen_pv_trap asm_exc_general_protection 14656415c4cSJuergen Grossxen_pv_trap asm_exc_page_fault 14756415c4cSJuergen Grossxen_pv_trap asm_exc_spurious_interrupt_bug 14856415c4cSJuergen Grossxen_pv_trap asm_exc_coprocessor_error 14956415c4cSJuergen Grossxen_pv_trap asm_exc_alignment_check 15056415c4cSJuergen Gross#ifdef CONFIG_X86_MCE 151c3d7fa66SJuergen Grossxen_pv_trap asm_xenpv_exc_machine_check 15256415c4cSJuergen Gross#endif /* CONFIG_X86_MCE */ 15356415c4cSJuergen Grossxen_pv_trap asm_exc_simd_coprocessor_error 15456415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION 15556415c4cSJuergen Grossxen_pv_trap entry_INT80_compat 15656415c4cSJuergen Gross#endif 1572e924936SJuergen Grossxen_pv_trap asm_exc_xen_unknown_trap 15856415c4cSJuergen Grossxen_pv_trap asm_exc_xen_hypervisor_callback 15956415c4cSJuergen Gross 16056415c4cSJuergen Gross __INIT 16156415c4cSJuergen GrossSYM_CODE_START(xen_early_idt_handler_array) 16256415c4cSJuergen Gross i = 0 16356415c4cSJuergen Gross .rept NUM_EXCEPTION_VECTORS 164cde07a4eSJosh Poimboeuf UNWIND_HINT_EMPTY 16556415c4cSJuergen Gross pop %rcx 16656415c4cSJuergen Gross pop %r11 16756415c4cSJuergen Gross jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE 16856415c4cSJuergen Gross i = i + 1 16956415c4cSJuergen Gross .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 17056415c4cSJuergen Gross .endr 17156415c4cSJuergen GrossSYM_CODE_END(xen_early_idt_handler_array) 17256415c4cSJuergen Gross __FINIT 17356415c4cSJuergen Gross 17456415c4cSJuergen Grosshypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 17556415c4cSJuergen Gross/* 17656415c4cSJuergen Gross * Xen64 iret frame: 17756415c4cSJuergen Gross * 17856415c4cSJuergen Gross * ss 17956415c4cSJuergen Gross * rsp 18056415c4cSJuergen Gross * rflags 18156415c4cSJuergen Gross * cs 18256415c4cSJuergen Gross * rip <-- standard iret frame 18356415c4cSJuergen Gross * 18456415c4cSJuergen Gross * flags 18556415c4cSJuergen Gross * 18656415c4cSJuergen Gross * rcx } 18756415c4cSJuergen Gross * r11 }<-- pushed by hypercall page 18856415c4cSJuergen Gross * rsp->rax } 18956415c4cSJuergen Gross */ 19056415c4cSJuergen GrossSYM_CODE_START(xen_iret) 191cde07a4eSJosh Poimboeuf UNWIND_HINT_EMPTY 192*8b87d8ceSPeter Zijlstra ANNOTATE_NOENDBR 19356415c4cSJuergen Gross pushq $0 19456415c4cSJuergen Gross jmp hypercall_iret 19556415c4cSJuergen GrossSYM_CODE_END(xen_iret) 19656415c4cSJuergen Gross 19756415c4cSJuergen Gross/* 1985c8f6a2eSLai Jiangshan * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is 1995c8f6a2eSLai Jiangshan * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() 2005c8f6a2eSLai Jiangshan * in XEN pv would cause %rsp to move up to the top of the kernel stack and 2015c8f6a2eSLai Jiangshan * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI 2025c8f6a2eSLai Jiangshan * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET 2035c8f6a2eSLai Jiangshan * frame at the same address is useless. 2045c8f6a2eSLai Jiangshan */ 2055c8f6a2eSLai JiangshanSYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) 2065c8f6a2eSLai Jiangshan UNWIND_HINT_REGS 2075c8f6a2eSLai Jiangshan POP_REGS 2085c8f6a2eSLai Jiangshan 2095c8f6a2eSLai Jiangshan /* stackleak_erase() can work safely on the kernel stack. */ 2105c8f6a2eSLai Jiangshan STACKLEAK_ERASE_NOCLOBBER 2115c8f6a2eSLai Jiangshan 2125c8f6a2eSLai Jiangshan addq $8, %rsp /* skip regs->orig_ax */ 2135c8f6a2eSLai Jiangshan jmp xen_iret 2145c8f6a2eSLai JiangshanSYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) 2155c8f6a2eSLai Jiangshan 2165c8f6a2eSLai Jiangshan/* 21756415c4cSJuergen Gross * Xen handles syscall callbacks much like ordinary exceptions, which 21856415c4cSJuergen Gross * means we have: 21956415c4cSJuergen Gross * - kernel gs 22056415c4cSJuergen Gross * - kernel rsp 22156415c4cSJuergen Gross * - an iret-like stack frame on the stack (including rcx and r11): 22256415c4cSJuergen Gross * ss 22356415c4cSJuergen Gross * rsp 22456415c4cSJuergen Gross * rflags 22556415c4cSJuergen Gross * cs 22656415c4cSJuergen Gross * rip 22756415c4cSJuergen Gross * r11 22856415c4cSJuergen Gross * rsp->rcx 22956415c4cSJuergen Gross */ 23056415c4cSJuergen Gross 23156415c4cSJuergen Gross/* Normal 64-bit system call target */ 232cde07a4eSJosh PoimboeufSYM_CODE_START(xen_syscall_target) 233cde07a4eSJosh Poimboeuf UNWIND_HINT_EMPTY 23456415c4cSJuergen Gross popq %rcx 23556415c4cSJuergen Gross popq %r11 23656415c4cSJuergen Gross 23756415c4cSJuergen Gross /* 23856415c4cSJuergen Gross * Neither Xen nor the kernel really knows what the old SS and 23956415c4cSJuergen Gross * CS were. The kernel expects __USER_DS and __USER_CS, so 24056415c4cSJuergen Gross * report those values even though Xen will guess its own values. 24156415c4cSJuergen Gross */ 24256415c4cSJuergen Gross movq $__USER_DS, 4*8(%rsp) 24356415c4cSJuergen Gross movq $__USER_CS, 1*8(%rsp) 24456415c4cSJuergen Gross 24556415c4cSJuergen Gross jmp entry_SYSCALL_64_after_hwframe 246cde07a4eSJosh PoimboeufSYM_CODE_END(xen_syscall_target) 24756415c4cSJuergen Gross 24856415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION 24956415c4cSJuergen Gross 25056415c4cSJuergen Gross/* 32-bit compat syscall target */ 251cde07a4eSJosh PoimboeufSYM_CODE_START(xen_syscall32_target) 252cde07a4eSJosh Poimboeuf UNWIND_HINT_EMPTY 25356415c4cSJuergen Gross popq %rcx 25456415c4cSJuergen Gross popq %r11 25556415c4cSJuergen Gross 25656415c4cSJuergen Gross /* 25756415c4cSJuergen Gross * Neither Xen nor the kernel really knows what the old SS and 25856415c4cSJuergen Gross * CS were. The kernel expects __USER32_DS and __USER32_CS, so 25956415c4cSJuergen Gross * report those values even though Xen will guess its own values. 26056415c4cSJuergen Gross */ 26156415c4cSJuergen Gross movq $__USER32_DS, 4*8(%rsp) 26256415c4cSJuergen Gross movq $__USER32_CS, 1*8(%rsp) 26356415c4cSJuergen Gross 26456415c4cSJuergen Gross jmp entry_SYSCALL_compat_after_hwframe 265cde07a4eSJosh PoimboeufSYM_CODE_END(xen_syscall32_target) 26656415c4cSJuergen Gross 26756415c4cSJuergen Gross/* 32-bit compat sysenter target */ 268cde07a4eSJosh PoimboeufSYM_CODE_START(xen_sysenter_target) 269cde07a4eSJosh Poimboeuf UNWIND_HINT_EMPTY 27056415c4cSJuergen Gross /* 27156415c4cSJuergen Gross * NB: Xen is polite and clears TF from EFLAGS for us. This means 27256415c4cSJuergen Gross * that we don't need to guard against single step exceptions here. 27356415c4cSJuergen Gross */ 27456415c4cSJuergen Gross popq %rcx 27556415c4cSJuergen Gross popq %r11 27656415c4cSJuergen Gross 27756415c4cSJuergen Gross /* 27856415c4cSJuergen Gross * Neither Xen nor the kernel really knows what the old SS and 27956415c4cSJuergen Gross * CS were. The kernel expects __USER32_DS and __USER32_CS, so 28056415c4cSJuergen Gross * report those values even though Xen will guess its own values. 28156415c4cSJuergen Gross */ 28256415c4cSJuergen Gross movq $__USER32_DS, 4*8(%rsp) 28356415c4cSJuergen Gross movq $__USER32_CS, 1*8(%rsp) 28456415c4cSJuergen Gross 28556415c4cSJuergen Gross jmp entry_SYSENTER_compat_after_hwframe 286cde07a4eSJosh PoimboeufSYM_CODE_END(xen_sysenter_target) 28756415c4cSJuergen Gross 28856415c4cSJuergen Gross#else /* !CONFIG_IA32_EMULATION */ 28956415c4cSJuergen Gross 290cde07a4eSJosh PoimboeufSYM_CODE_START(xen_syscall32_target) 291cde07a4eSJosh PoimboeufSYM_CODE_START(xen_sysenter_target) 292cde07a4eSJosh Poimboeuf UNWIND_HINT_EMPTY 29356415c4cSJuergen Gross lea 16(%rsp), %rsp /* strip %rcx, %r11 */ 29456415c4cSJuergen Gross mov $-ENOSYS, %rax 29556415c4cSJuergen Gross pushq $0 29656415c4cSJuergen Gross jmp hypercall_iret 297cde07a4eSJosh PoimboeufSYM_CODE_END(xen_sysenter_target) 298cde07a4eSJosh PoimboeufSYM_CODE_END(xen_syscall32_target) 29956415c4cSJuergen Gross 30056415c4cSJuergen Gross#endif /* CONFIG_IA32_EMULATION */ 301