xref: /openbmc/linux/arch/x86/xen/xen-asm.S (revision 56415c4c)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
25393744bSJeremy Fitzhardinge/*
3edcb5cf8SJuergen Gross * Asm versions of Xen pv-ops, suitable for direct use.
4130ace11STejun Heo *
5130ace11STejun Heo * We only bother with direct forms (ie, vcpu in percpu data) of the
6edcb5cf8SJuergen Gross * operations here; the indirect forms are better handled in C.
75393744bSJeremy Fitzhardinge */
85393744bSJeremy Fitzhardinge
956415c4cSJuergen Gross#include <asm/errno.h>
105393744bSJeremy Fitzhardinge#include <asm/asm-offsets.h>
115393744bSJeremy Fitzhardinge#include <asm/percpu.h>
125393744bSJeremy Fitzhardinge#include <asm/processor-flags.h>
1356415c4cSJuergen Gross#include <asm/segment.h>
1456415c4cSJuergen Gross#include <asm/thread_info.h>
1555aedddbSPeter Zijlstra#include <asm/asm.h>
1656415c4cSJuergen Gross#include <asm/frame.h>
175393744bSJeremy Fitzhardinge
1856415c4cSJuergen Gross#include <xen/interface/xen.h>
1956415c4cSJuergen Gross
2056415c4cSJuergen Gross#include <linux/init.h>
21edcb5cf8SJuergen Gross#include <linux/linkage.h>
225393744bSJeremy Fitzhardinge
235393744bSJeremy Fitzhardinge/*
24130ace11STejun Heo * Enable events.  This clears the event mask and tests the pending
25130ace11STejun Heo * event status with one and operation.  If there are pending events,
26130ace11STejun Heo * then enter the hypervisor to get them handled.
275393744bSJeremy Fitzhardinge */
286dcc5627SJiri SlabySYM_FUNC_START(xen_irq_enable_direct)
298be0eb7eSJosh Poimboeuf	FRAME_BEGIN
305393744bSJeremy Fitzhardinge	/* Unmask events */
315393744bSJeremy Fitzhardinge	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
325393744bSJeremy Fitzhardinge
33130ace11STejun Heo	/*
34130ace11STejun Heo	 * Preempt here doesn't matter because that will deal with any
35130ace11STejun Heo	 * pending interrupts.  The pending check may end up being run
36130ace11STejun Heo	 * on the wrong CPU, but that doesn't hurt.
37130ace11STejun Heo	 */
385393744bSJeremy Fitzhardinge
395393744bSJeremy Fitzhardinge	/* Test for pending */
405393744bSJeremy Fitzhardinge	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
415393744bSJeremy Fitzhardinge	jz 1f
425393744bSJeremy Fitzhardinge
43edcb5cf8SJuergen Gross	call check_events
445393744bSJeremy Fitzhardinge1:
458be0eb7eSJosh Poimboeuf	FRAME_END
465393744bSJeremy Fitzhardinge	ret
476dcc5627SJiri SlabySYM_FUNC_END(xen_irq_enable_direct)
485393744bSJeremy Fitzhardinge
495393744bSJeremy Fitzhardinge
505393744bSJeremy Fitzhardinge/*
51130ace11STejun Heo * Disabling events is simply a matter of making the event mask
52130ace11STejun Heo * non-zero.
535393744bSJeremy Fitzhardinge */
546dcc5627SJiri SlabySYM_FUNC_START(xen_irq_disable_direct)
555393744bSJeremy Fitzhardinge	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
565393744bSJeremy Fitzhardinge	ret
576dcc5627SJiri SlabySYM_FUNC_END(xen_irq_disable_direct)
585393744bSJeremy Fitzhardinge
595393744bSJeremy Fitzhardinge/*
60130ace11STejun Heo * (xen_)save_fl is used to get the current interrupt enable status.
61130ace11STejun Heo * Callers expect the status to be in X86_EFLAGS_IF, and other bits
62130ace11STejun Heo * may be set in the return value.  We take advantage of this by
63130ace11STejun Heo * making sure that X86_EFLAGS_IF has the right value (and other bits
64130ace11STejun Heo * in that byte are 0), but other bits in the return value are
65130ace11STejun Heo * undefined.  We need to toggle the state of the bit, because Xen and
66130ace11STejun Heo * x86 use opposite senses (mask vs enable).
675393744bSJeremy Fitzhardinge */
686dcc5627SJiri SlabySYM_FUNC_START(xen_save_fl_direct)
695393744bSJeremy Fitzhardinge	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
705393744bSJeremy Fitzhardinge	setz %ah
715393744bSJeremy Fitzhardinge	addb %ah, %ah
725393744bSJeremy Fitzhardinge	ret
736dcc5627SJiri SlabySYM_FUNC_END(xen_save_fl_direct)
745393744bSJeremy Fitzhardinge
755393744bSJeremy Fitzhardinge
765393744bSJeremy Fitzhardinge/*
77130ace11STejun Heo * In principle the caller should be passing us a value return from
78130ace11STejun Heo * xen_save_fl_direct, but for robustness sake we test only the
79130ace11STejun Heo * X86_EFLAGS_IF flag rather than the whole byte. After setting the
80130ace11STejun Heo * interrupt mask state, it checks for unmasked pending events and
81130ace11STejun Heo * enters the hypervisor to get them delivered if so.
825393744bSJeremy Fitzhardinge */
836dcc5627SJiri SlabySYM_FUNC_START(xen_restore_fl_direct)
848be0eb7eSJosh Poimboeuf	FRAME_BEGIN
855393744bSJeremy Fitzhardinge	testw $X86_EFLAGS_IF, %di
865393744bSJeremy Fitzhardinge	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
87130ace11STejun Heo	/*
88130ace11STejun Heo	 * Preempt here doesn't matter because that will deal with any
89130ace11STejun Heo	 * pending interrupts.  The pending check may end up being run
90130ace11STejun Heo	 * on the wrong CPU, but that doesn't hurt.
91130ace11STejun Heo	 */
925393744bSJeremy Fitzhardinge
935393744bSJeremy Fitzhardinge	/* check for unmasked and pending */
945393744bSJeremy Fitzhardinge	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
957eb7ce4dSDavid Vrabel	jnz 1f
96edcb5cf8SJuergen Gross	call check_events
975393744bSJeremy Fitzhardinge1:
988be0eb7eSJosh Poimboeuf	FRAME_END
995393744bSJeremy Fitzhardinge	ret
1006dcc5627SJiri SlabySYM_FUNC_END(xen_restore_fl_direct)
1015393744bSJeremy Fitzhardinge
1025393744bSJeremy Fitzhardinge
1035393744bSJeremy Fitzhardinge/*
104130ace11STejun Heo * Force an event check by making a hypercall, but preserve regs
105130ace11STejun Heo * before making the call.
1065393744bSJeremy Fitzhardinge */
1076dcc5627SJiri SlabySYM_FUNC_START(check_events)
1088be0eb7eSJosh Poimboeuf	FRAME_BEGIN
1095393744bSJeremy Fitzhardinge	push %rax
1105393744bSJeremy Fitzhardinge	push %rcx
1115393744bSJeremy Fitzhardinge	push %rdx
1125393744bSJeremy Fitzhardinge	push %rsi
1135393744bSJeremy Fitzhardinge	push %rdi
1145393744bSJeremy Fitzhardinge	push %r8
1155393744bSJeremy Fitzhardinge	push %r9
1165393744bSJeremy Fitzhardinge	push %r10
1175393744bSJeremy Fitzhardinge	push %r11
1185393744bSJeremy Fitzhardinge	call xen_force_evtchn_callback
1195393744bSJeremy Fitzhardinge	pop %r11
1205393744bSJeremy Fitzhardinge	pop %r10
1215393744bSJeremy Fitzhardinge	pop %r9
1225393744bSJeremy Fitzhardinge	pop %r8
1235393744bSJeremy Fitzhardinge	pop %rdi
1245393744bSJeremy Fitzhardinge	pop %rsi
1255393744bSJeremy Fitzhardinge	pop %rdx
1265393744bSJeremy Fitzhardinge	pop %rcx
1275393744bSJeremy Fitzhardinge	pop %rax
1288be0eb7eSJosh Poimboeuf	FRAME_END
1295393744bSJeremy Fitzhardinge	ret
1306dcc5627SJiri SlabySYM_FUNC_END(check_events)
13155aedddbSPeter Zijlstra
1326dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2)
13355aedddbSPeter Zijlstra	FRAME_BEGIN
13455aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
13555aedddbSPeter Zijlstra	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
13655aedddbSPeter Zijlstra	FRAME_END
13755aedddbSPeter Zijlstra	ret
1386dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2);
13955aedddbSPeter Zijlstra
1406dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2_direct)
14155aedddbSPeter Zijlstra	FRAME_BEGIN
14255aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
14355aedddbSPeter Zijlstra	FRAME_END
14455aedddbSPeter Zijlstra	ret
1456dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2_direct);
14656415c4cSJuergen Gross
14756415c4cSJuergen Gross.macro xen_pv_trap name
14856415c4cSJuergen GrossSYM_CODE_START(xen_\name)
14956415c4cSJuergen Gross	pop %rcx
15056415c4cSJuergen Gross	pop %r11
15156415c4cSJuergen Gross	jmp  \name
15256415c4cSJuergen GrossSYM_CODE_END(xen_\name)
15356415c4cSJuergen Gross_ASM_NOKPROBE(xen_\name)
15456415c4cSJuergen Gross.endm
15556415c4cSJuergen Gross
15656415c4cSJuergen Grossxen_pv_trap asm_exc_divide_error
15756415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_debug
15856415c4cSJuergen Grossxen_pv_trap asm_exc_int3
15956415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_nmi
16056415c4cSJuergen Grossxen_pv_trap asm_exc_overflow
16156415c4cSJuergen Grossxen_pv_trap asm_exc_bounds
16256415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_op
16356415c4cSJuergen Grossxen_pv_trap asm_exc_device_not_available
16456415c4cSJuergen Grossxen_pv_trap asm_exc_double_fault
16556415c4cSJuergen Grossxen_pv_trap asm_exc_coproc_segment_overrun
16656415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_tss
16756415c4cSJuergen Grossxen_pv_trap asm_exc_segment_not_present
16856415c4cSJuergen Grossxen_pv_trap asm_exc_stack_segment
16956415c4cSJuergen Grossxen_pv_trap asm_exc_general_protection
17056415c4cSJuergen Grossxen_pv_trap asm_exc_page_fault
17156415c4cSJuergen Grossxen_pv_trap asm_exc_spurious_interrupt_bug
17256415c4cSJuergen Grossxen_pv_trap asm_exc_coprocessor_error
17356415c4cSJuergen Grossxen_pv_trap asm_exc_alignment_check
17456415c4cSJuergen Gross#ifdef CONFIG_X86_MCE
17556415c4cSJuergen Grossxen_pv_trap asm_exc_machine_check
17656415c4cSJuergen Gross#endif /* CONFIG_X86_MCE */
17756415c4cSJuergen Grossxen_pv_trap asm_exc_simd_coprocessor_error
17856415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION
17956415c4cSJuergen Grossxen_pv_trap entry_INT80_compat
18056415c4cSJuergen Gross#endif
18156415c4cSJuergen Grossxen_pv_trap asm_exc_xen_hypervisor_callback
18256415c4cSJuergen Gross
18356415c4cSJuergen Gross	__INIT
18456415c4cSJuergen GrossSYM_CODE_START(xen_early_idt_handler_array)
18556415c4cSJuergen Gross	i = 0
18656415c4cSJuergen Gross	.rept NUM_EXCEPTION_VECTORS
18756415c4cSJuergen Gross	pop %rcx
18856415c4cSJuergen Gross	pop %r11
18956415c4cSJuergen Gross	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
19056415c4cSJuergen Gross	i = i + 1
19156415c4cSJuergen Gross	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
19256415c4cSJuergen Gross	.endr
19356415c4cSJuergen GrossSYM_CODE_END(xen_early_idt_handler_array)
19456415c4cSJuergen Gross	__FINIT
19556415c4cSJuergen Gross
19656415c4cSJuergen Grosshypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
19756415c4cSJuergen Gross/*
19856415c4cSJuergen Gross * Xen64 iret frame:
19956415c4cSJuergen Gross *
20056415c4cSJuergen Gross *	ss
20156415c4cSJuergen Gross *	rsp
20256415c4cSJuergen Gross *	rflags
20356415c4cSJuergen Gross *	cs
20456415c4cSJuergen Gross *	rip		<-- standard iret frame
20556415c4cSJuergen Gross *
20656415c4cSJuergen Gross *	flags
20756415c4cSJuergen Gross *
20856415c4cSJuergen Gross *	rcx		}
20956415c4cSJuergen Gross *	r11		}<-- pushed by hypercall page
21056415c4cSJuergen Gross * rsp->rax		}
21156415c4cSJuergen Gross */
21256415c4cSJuergen GrossSYM_CODE_START(xen_iret)
21356415c4cSJuergen Gross	pushq $0
21456415c4cSJuergen Gross	jmp hypercall_iret
21556415c4cSJuergen GrossSYM_CODE_END(xen_iret)
21656415c4cSJuergen Gross
21756415c4cSJuergen GrossSYM_CODE_START(xen_sysret64)
21856415c4cSJuergen Gross	/*
21956415c4cSJuergen Gross	 * We're already on the usermode stack at this point, but
22056415c4cSJuergen Gross	 * still with the kernel gs, so we can easily switch back.
22156415c4cSJuergen Gross	 *
22256415c4cSJuergen Gross	 * tss.sp2 is scratch space.
22356415c4cSJuergen Gross	 */
22456415c4cSJuergen Gross	movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
22556415c4cSJuergen Gross	movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
22656415c4cSJuergen Gross
22756415c4cSJuergen Gross	pushq $__USER_DS
22856415c4cSJuergen Gross	pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
22956415c4cSJuergen Gross	pushq %r11
23056415c4cSJuergen Gross	pushq $__USER_CS
23156415c4cSJuergen Gross	pushq %rcx
23256415c4cSJuergen Gross
23356415c4cSJuergen Gross	pushq $VGCF_in_syscall
23456415c4cSJuergen Gross	jmp hypercall_iret
23556415c4cSJuergen GrossSYM_CODE_END(xen_sysret64)
23656415c4cSJuergen Gross
23756415c4cSJuergen Gross/*
23856415c4cSJuergen Gross * Xen handles syscall callbacks much like ordinary exceptions, which
23956415c4cSJuergen Gross * means we have:
24056415c4cSJuergen Gross * - kernel gs
24156415c4cSJuergen Gross * - kernel rsp
24256415c4cSJuergen Gross * - an iret-like stack frame on the stack (including rcx and r11):
24356415c4cSJuergen Gross *	ss
24456415c4cSJuergen Gross *	rsp
24556415c4cSJuergen Gross *	rflags
24656415c4cSJuergen Gross *	cs
24756415c4cSJuergen Gross *	rip
24856415c4cSJuergen Gross *	r11
24956415c4cSJuergen Gross * rsp->rcx
25056415c4cSJuergen Gross */
25156415c4cSJuergen Gross
25256415c4cSJuergen Gross/* Normal 64-bit system call target */
25356415c4cSJuergen GrossSYM_FUNC_START(xen_syscall_target)
25456415c4cSJuergen Gross	popq %rcx
25556415c4cSJuergen Gross	popq %r11
25656415c4cSJuergen Gross
25756415c4cSJuergen Gross	/*
25856415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
25956415c4cSJuergen Gross	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
26056415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
26156415c4cSJuergen Gross	 */
26256415c4cSJuergen Gross	movq $__USER_DS, 4*8(%rsp)
26356415c4cSJuergen Gross	movq $__USER_CS, 1*8(%rsp)
26456415c4cSJuergen Gross
26556415c4cSJuergen Gross	jmp entry_SYSCALL_64_after_hwframe
26656415c4cSJuergen GrossSYM_FUNC_END(xen_syscall_target)
26756415c4cSJuergen Gross
26856415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION
26956415c4cSJuergen Gross
27056415c4cSJuergen Gross/* 32-bit compat syscall target */
27156415c4cSJuergen GrossSYM_FUNC_START(xen_syscall32_target)
27256415c4cSJuergen Gross	popq %rcx
27356415c4cSJuergen Gross	popq %r11
27456415c4cSJuergen Gross
27556415c4cSJuergen Gross	/*
27656415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
27756415c4cSJuergen Gross	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
27856415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
27956415c4cSJuergen Gross	 */
28056415c4cSJuergen Gross	movq $__USER32_DS, 4*8(%rsp)
28156415c4cSJuergen Gross	movq $__USER32_CS, 1*8(%rsp)
28256415c4cSJuergen Gross
28356415c4cSJuergen Gross	jmp entry_SYSCALL_compat_after_hwframe
28456415c4cSJuergen GrossSYM_FUNC_END(xen_syscall32_target)
28556415c4cSJuergen Gross
28656415c4cSJuergen Gross/* 32-bit compat sysenter target */
28756415c4cSJuergen GrossSYM_FUNC_START(xen_sysenter_target)
28856415c4cSJuergen Gross	/*
28956415c4cSJuergen Gross	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
29056415c4cSJuergen Gross	 * that we don't need to guard against single step exceptions here.
29156415c4cSJuergen Gross	 */
29256415c4cSJuergen Gross	popq %rcx
29356415c4cSJuergen Gross	popq %r11
29456415c4cSJuergen Gross
29556415c4cSJuergen Gross	/*
29656415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
29756415c4cSJuergen Gross	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
29856415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
29956415c4cSJuergen Gross	 */
30056415c4cSJuergen Gross	movq $__USER32_DS, 4*8(%rsp)
30156415c4cSJuergen Gross	movq $__USER32_CS, 1*8(%rsp)
30256415c4cSJuergen Gross
30356415c4cSJuergen Gross	jmp entry_SYSENTER_compat_after_hwframe
30456415c4cSJuergen GrossSYM_FUNC_END(xen_sysenter_target)
30556415c4cSJuergen Gross
30656415c4cSJuergen Gross#else /* !CONFIG_IA32_EMULATION */
30756415c4cSJuergen Gross
30856415c4cSJuergen GrossSYM_FUNC_START_ALIAS(xen_syscall32_target)
30956415c4cSJuergen GrossSYM_FUNC_START(xen_sysenter_target)
31056415c4cSJuergen Gross	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
31156415c4cSJuergen Gross	mov $-ENOSYS, %rax
31256415c4cSJuergen Gross	pushq $0
31356415c4cSJuergen Gross	jmp hypercall_iret
31456415c4cSJuergen GrossSYM_FUNC_END(xen_sysenter_target)
31556415c4cSJuergen GrossSYM_FUNC_END_ALIAS(xen_syscall32_target)
31656415c4cSJuergen Gross
31756415c4cSJuergen Gross#endif	/* CONFIG_IA32_EMULATION */
318