xref: /openbmc/linux/arch/x86/xen/xen-asm.S (revision 4591766f)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
25393744bSJeremy Fitzhardinge/*
3edcb5cf8SJuergen Gross * Asm versions of Xen pv-ops, suitable for direct use.
4130ace11STejun Heo *
5130ace11STejun Heo * We only bother with direct forms (ie, vcpu in percpu data) of the
6edcb5cf8SJuergen Gross * operations here; the indirect forms are better handled in C.
75393744bSJeremy Fitzhardinge */
85393744bSJeremy Fitzhardinge
956415c4cSJuergen Gross#include <asm/errno.h>
105393744bSJeremy Fitzhardinge#include <asm/asm-offsets.h>
115393744bSJeremy Fitzhardinge#include <asm/percpu.h>
125393744bSJeremy Fitzhardinge#include <asm/processor-flags.h>
1356415c4cSJuergen Gross#include <asm/segment.h>
1456415c4cSJuergen Gross#include <asm/thread_info.h>
1555aedddbSPeter Zijlstra#include <asm/asm.h>
1656415c4cSJuergen Gross#include <asm/frame.h>
17cde07a4eSJosh Poimboeuf#include <asm/unwind_hints.h>
185393744bSJeremy Fitzhardinge
1956415c4cSJuergen Gross#include <xen/interface/xen.h>
2056415c4cSJuergen Gross
2156415c4cSJuergen Gross#include <linux/init.h>
22edcb5cf8SJuergen Gross#include <linux/linkage.h>
235c8f6a2eSLai Jiangshan#include <../entry/calling.h>
245393744bSJeremy Fitzhardinge
2509c41307SPeter Zijlstra.pushsection .noinstr.text, "ax"
265393744bSJeremy Fitzhardinge/*
27130ace11STejun Heo * Disabling events is simply a matter of making the event mask
28130ace11STejun Heo * non-zero.
295393744bSJeremy Fitzhardinge */
306dcc5627SJiri SlabySYM_FUNC_START(xen_irq_disable_direct)
315393744bSJeremy Fitzhardinge	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
32f94909ceSPeter Zijlstra	RET
336dcc5627SJiri SlabySYM_FUNC_END(xen_irq_disable_direct)
345393744bSJeremy Fitzhardinge
355393744bSJeremy Fitzhardinge/*
36130ace11STejun Heo * Force an event check by making a hypercall, but preserve regs
37130ace11STejun Heo * before making the call.
385393744bSJeremy Fitzhardinge */
396dcc5627SJiri SlabySYM_FUNC_START(check_events)
408be0eb7eSJosh Poimboeuf	FRAME_BEGIN
415393744bSJeremy Fitzhardinge	push %rax
425393744bSJeremy Fitzhardinge	push %rcx
435393744bSJeremy Fitzhardinge	push %rdx
445393744bSJeremy Fitzhardinge	push %rsi
455393744bSJeremy Fitzhardinge	push %rdi
465393744bSJeremy Fitzhardinge	push %r8
475393744bSJeremy Fitzhardinge	push %r9
485393744bSJeremy Fitzhardinge	push %r10
495393744bSJeremy Fitzhardinge	push %r11
505393744bSJeremy Fitzhardinge	call xen_force_evtchn_callback
515393744bSJeremy Fitzhardinge	pop %r11
525393744bSJeremy Fitzhardinge	pop %r10
535393744bSJeremy Fitzhardinge	pop %r9
545393744bSJeremy Fitzhardinge	pop %r8
555393744bSJeremy Fitzhardinge	pop %rdi
565393744bSJeremy Fitzhardinge	pop %rsi
575393744bSJeremy Fitzhardinge	pop %rdx
585393744bSJeremy Fitzhardinge	pop %rcx
595393744bSJeremy Fitzhardinge	pop %rax
608be0eb7eSJosh Poimboeuf	FRAME_END
61f94909ceSPeter Zijlstra	RET
626dcc5627SJiri SlabySYM_FUNC_END(check_events)
6355aedddbSPeter Zijlstra
64d7bfc7d5SPeter Zijlstra/*
65d7bfc7d5SPeter Zijlstra * Enable events.  This clears the event mask and tests the pending
66d7bfc7d5SPeter Zijlstra * event status with one and operation.  If there are pending events,
67d7bfc7d5SPeter Zijlstra * then enter the hypervisor to get them handled.
68d7bfc7d5SPeter Zijlstra */
69d7bfc7d5SPeter ZijlstraSYM_FUNC_START(xen_irq_enable_direct)
70d7bfc7d5SPeter Zijlstra	FRAME_BEGIN
71d7bfc7d5SPeter Zijlstra	/* Unmask events */
72d7bfc7d5SPeter Zijlstra	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
73d7bfc7d5SPeter Zijlstra
74d7bfc7d5SPeter Zijlstra	/*
75d7bfc7d5SPeter Zijlstra	 * Preempt here doesn't matter because that will deal with any
76d7bfc7d5SPeter Zijlstra	 * pending interrupts.  The pending check may end up being run
77d7bfc7d5SPeter Zijlstra	 * on the wrong CPU, but that doesn't hurt.
78d7bfc7d5SPeter Zijlstra	 */
79d7bfc7d5SPeter Zijlstra
80d7bfc7d5SPeter Zijlstra	/* Test for pending */
81d7bfc7d5SPeter Zijlstra	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
82d7bfc7d5SPeter Zijlstra	jz 1f
83d7bfc7d5SPeter Zijlstra
84d7bfc7d5SPeter Zijlstra	call check_events
85d7bfc7d5SPeter Zijlstra1:
86d7bfc7d5SPeter Zijlstra	FRAME_END
87f94909ceSPeter Zijlstra	RET
88d7bfc7d5SPeter ZijlstraSYM_FUNC_END(xen_irq_enable_direct)
89d7bfc7d5SPeter Zijlstra
9020125c87SPeter Zijlstra/*
9120125c87SPeter Zijlstra * (xen_)save_fl is used to get the current interrupt enable status.
9220125c87SPeter Zijlstra * Callers expect the status to be in X86_EFLAGS_IF, and other bits
9320125c87SPeter Zijlstra * may be set in the return value.  We take advantage of this by
9420125c87SPeter Zijlstra * making sure that X86_EFLAGS_IF has the right value (and other bits
9520125c87SPeter Zijlstra * in that byte are 0), but other bits in the return value are
9620125c87SPeter Zijlstra * undefined.  We need to toggle the state of the bit, because Xen and
9720125c87SPeter Zijlstra * x86 use opposite senses (mask vs enable).
9820125c87SPeter Zijlstra */
9920125c87SPeter ZijlstraSYM_FUNC_START(xen_save_fl_direct)
10020125c87SPeter Zijlstra	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
10120125c87SPeter Zijlstra	setz %ah
10220125c87SPeter Zijlstra	addb %ah, %ah
103f94909ceSPeter Zijlstra	RET
10420125c87SPeter ZijlstraSYM_FUNC_END(xen_save_fl_direct)
10520125c87SPeter Zijlstra
1066dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2)
10755aedddbSPeter Zijlstra	FRAME_BEGIN
10855aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
10955aedddbSPeter Zijlstra	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
11055aedddbSPeter Zijlstra	FRAME_END
111f94909ceSPeter Zijlstra	RET
1126dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2);
11355aedddbSPeter Zijlstra
1146dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2_direct)
11555aedddbSPeter Zijlstra	FRAME_BEGIN
11655aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
11755aedddbSPeter Zijlstra	FRAME_END
118f94909ceSPeter Zijlstra	RET
1196dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2_direct);
1200a53c9acSPeter Zijlstra.popsection
12156415c4cSJuergen Gross
12256415c4cSJuergen Gross.macro xen_pv_trap name
12356415c4cSJuergen GrossSYM_CODE_START(xen_\name)
124a09a6e23SPeter Zijlstra	UNWIND_HINT_ENTRY
1255b2fc515SPeter Zijlstra	ENDBR
12656415c4cSJuergen Gross	pop %rcx
12756415c4cSJuergen Gross	pop %r11
12856415c4cSJuergen Gross	jmp  \name
12956415c4cSJuergen GrossSYM_CODE_END(xen_\name)
13056415c4cSJuergen Gross_ASM_NOKPROBE(xen_\name)
13156415c4cSJuergen Gross.endm
13256415c4cSJuergen Gross
13356415c4cSJuergen Grossxen_pv_trap asm_exc_divide_error
13456415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_debug
13556415c4cSJuergen Grossxen_pv_trap asm_exc_int3
13656415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_nmi
13756415c4cSJuergen Grossxen_pv_trap asm_exc_overflow
13856415c4cSJuergen Grossxen_pv_trap asm_exc_bounds
13956415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_op
14056415c4cSJuergen Grossxen_pv_trap asm_exc_device_not_available
1415b4c6d65SJuergen Grossxen_pv_trap asm_xenpv_exc_double_fault
14256415c4cSJuergen Grossxen_pv_trap asm_exc_coproc_segment_overrun
14356415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_tss
14456415c4cSJuergen Grossxen_pv_trap asm_exc_segment_not_present
14556415c4cSJuergen Grossxen_pv_trap asm_exc_stack_segment
14656415c4cSJuergen Grossxen_pv_trap asm_exc_general_protection
14756415c4cSJuergen Grossxen_pv_trap asm_exc_page_fault
14856415c4cSJuergen Grossxen_pv_trap asm_exc_spurious_interrupt_bug
14956415c4cSJuergen Grossxen_pv_trap asm_exc_coprocessor_error
15056415c4cSJuergen Grossxen_pv_trap asm_exc_alignment_check
151a5f6c2acSRick Edgecombe#ifdef CONFIG_X86_CET
1525b2fc515SPeter Zijlstraxen_pv_trap asm_exc_control_protection
1535b2fc515SPeter Zijlstra#endif
15456415c4cSJuergen Gross#ifdef CONFIG_X86_MCE
155c3d7fa66SJuergen Grossxen_pv_trap asm_xenpv_exc_machine_check
15656415c4cSJuergen Gross#endif /* CONFIG_X86_MCE */
15756415c4cSJuergen Grossxen_pv_trap asm_exc_simd_coprocessor_error
15856415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION
159*4591766fSThomas Gleixnerxen_pv_trap asm_int80_emulation
16056415c4cSJuergen Gross#endif
1612e924936SJuergen Grossxen_pv_trap asm_exc_xen_unknown_trap
16256415c4cSJuergen Grossxen_pv_trap asm_exc_xen_hypervisor_callback
16356415c4cSJuergen Gross
16456415c4cSJuergen Gross	__INIT
16556415c4cSJuergen GrossSYM_CODE_START(xen_early_idt_handler_array)
16656415c4cSJuergen Gross	i = 0
16756415c4cSJuergen Gross	.rept NUM_EXCEPTION_VECTORS
168fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
1695b2fc515SPeter Zijlstra	ENDBR
17056415c4cSJuergen Gross	pop %rcx
17156415c4cSJuergen Gross	pop %r11
17256415c4cSJuergen Gross	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
17356415c4cSJuergen Gross	i = i + 1
17456415c4cSJuergen Gross	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
17556415c4cSJuergen Gross	.endr
17656415c4cSJuergen GrossSYM_CODE_END(xen_early_idt_handler_array)
17756415c4cSJuergen Gross	__FINIT
17856415c4cSJuergen Gross
17956415c4cSJuergen Grosshypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
18056415c4cSJuergen Gross/*
18156415c4cSJuergen Gross * Xen64 iret frame:
18256415c4cSJuergen Gross *
18356415c4cSJuergen Gross *	ss
18456415c4cSJuergen Gross *	rsp
18556415c4cSJuergen Gross *	rflags
18656415c4cSJuergen Gross *	cs
18756415c4cSJuergen Gross *	rip		<-- standard iret frame
18856415c4cSJuergen Gross *
18956415c4cSJuergen Gross *	flags
19056415c4cSJuergen Gross *
19156415c4cSJuergen Gross *	rcx		}
19256415c4cSJuergen Gross *	r11		}<-- pushed by hypercall page
19356415c4cSJuergen Gross * rsp->rax		}
19456415c4cSJuergen Gross */
19556415c4cSJuergen GrossSYM_CODE_START(xen_iret)
196fb799447SJosh Poimboeuf	UNWIND_HINT_UNDEFINED
1978b87d8ceSPeter Zijlstra	ANNOTATE_NOENDBR
19856415c4cSJuergen Gross	pushq $0
19956415c4cSJuergen Gross	jmp hypercall_iret
20056415c4cSJuergen GrossSYM_CODE_END(xen_iret)
20156415c4cSJuergen Gross
20256415c4cSJuergen Gross/*
2035c8f6a2eSLai Jiangshan * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
2045c8f6a2eSLai Jiangshan * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
2055c8f6a2eSLai Jiangshan * in XEN pv would cause %rsp to move up to the top of the kernel stack and
2065c8f6a2eSLai Jiangshan * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
2075c8f6a2eSLai Jiangshan * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
2085c8f6a2eSLai Jiangshan * frame at the same address is useless.
2095c8f6a2eSLai Jiangshan */
2105c8f6a2eSLai JiangshanSYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
2115c8f6a2eSLai Jiangshan	UNWIND_HINT_REGS
2125c8f6a2eSLai Jiangshan	POP_REGS
2135c8f6a2eSLai Jiangshan
2145c8f6a2eSLai Jiangshan	/* stackleak_erase() can work safely on the kernel stack. */
2155c8f6a2eSLai Jiangshan	STACKLEAK_ERASE_NOCLOBBER
2165c8f6a2eSLai Jiangshan
2175c8f6a2eSLai Jiangshan	addq	$8, %rsp	/* skip regs->orig_ax */
2185c8f6a2eSLai Jiangshan	jmp xen_iret
2195c8f6a2eSLai JiangshanSYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
2205c8f6a2eSLai Jiangshan
2215c8f6a2eSLai Jiangshan/*
22256415c4cSJuergen Gross * Xen handles syscall callbacks much like ordinary exceptions, which
22356415c4cSJuergen Gross * means we have:
22456415c4cSJuergen Gross * - kernel gs
22556415c4cSJuergen Gross * - kernel rsp
22656415c4cSJuergen Gross * - an iret-like stack frame on the stack (including rcx and r11):
22756415c4cSJuergen Gross *	ss
22856415c4cSJuergen Gross *	rsp
22956415c4cSJuergen Gross *	rflags
23056415c4cSJuergen Gross *	cs
23156415c4cSJuergen Gross *	rip
23256415c4cSJuergen Gross *	r11
23356415c4cSJuergen Gross * rsp->rcx
23456415c4cSJuergen Gross */
23556415c4cSJuergen Gross
23656415c4cSJuergen Gross/* Normal 64-bit system call target */
237b75b7f8eSPeter ZijlstraSYM_CODE_START(xen_entry_SYSCALL_64)
238a09a6e23SPeter Zijlstra	UNWIND_HINT_ENTRY
2395b2fc515SPeter Zijlstra	ENDBR
24056415c4cSJuergen Gross	popq %rcx
24156415c4cSJuergen Gross	popq %r11
24256415c4cSJuergen Gross
24356415c4cSJuergen Gross	/*
24456415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
24556415c4cSJuergen Gross	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
24656415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
24756415c4cSJuergen Gross	 */
24856415c4cSJuergen Gross	movq $__USER_DS, 4*8(%rsp)
24956415c4cSJuergen Gross	movq $__USER_CS, 1*8(%rsp)
25056415c4cSJuergen Gross
25156415c4cSJuergen Gross	jmp entry_SYSCALL_64_after_hwframe
252b75b7f8eSPeter ZijlstraSYM_CODE_END(xen_entry_SYSCALL_64)
25356415c4cSJuergen Gross
25456415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION
25556415c4cSJuergen Gross
25656415c4cSJuergen Gross/* 32-bit compat syscall target */
257b75b7f8eSPeter ZijlstraSYM_CODE_START(xen_entry_SYSCALL_compat)
258a09a6e23SPeter Zijlstra	UNWIND_HINT_ENTRY
2595b2fc515SPeter Zijlstra	ENDBR
26056415c4cSJuergen Gross	popq %rcx
26156415c4cSJuergen Gross	popq %r11
26256415c4cSJuergen Gross
26356415c4cSJuergen Gross	/*
26456415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
265695c39bcSBrian Gerst	 * CS were.  The kernel expects __USER_DS and __USER32_CS, so
26656415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
26756415c4cSJuergen Gross	 */
268695c39bcSBrian Gerst	movq $__USER_DS, 4*8(%rsp)
26956415c4cSJuergen Gross	movq $__USER32_CS, 1*8(%rsp)
27056415c4cSJuergen Gross
27156415c4cSJuergen Gross	jmp entry_SYSCALL_compat_after_hwframe
272b75b7f8eSPeter ZijlstraSYM_CODE_END(xen_entry_SYSCALL_compat)
27356415c4cSJuergen Gross
27456415c4cSJuergen Gross/* 32-bit compat sysenter target */
275b75b7f8eSPeter ZijlstraSYM_CODE_START(xen_entry_SYSENTER_compat)
276a09a6e23SPeter Zijlstra	UNWIND_HINT_ENTRY
2775b2fc515SPeter Zijlstra	ENDBR
27856415c4cSJuergen Gross	/*
27956415c4cSJuergen Gross	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
28056415c4cSJuergen Gross	 * that we don't need to guard against single step exceptions here.
28156415c4cSJuergen Gross	 */
28256415c4cSJuergen Gross	popq %rcx
28356415c4cSJuergen Gross	popq %r11
28456415c4cSJuergen Gross
28556415c4cSJuergen Gross	/*
28656415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
287695c39bcSBrian Gerst	 * CS were.  The kernel expects __USER_DS and __USER32_CS, so
28856415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
28956415c4cSJuergen Gross	 */
290695c39bcSBrian Gerst	movq $__USER_DS, 4*8(%rsp)
29156415c4cSJuergen Gross	movq $__USER32_CS, 1*8(%rsp)
29256415c4cSJuergen Gross
29356415c4cSJuergen Gross	jmp entry_SYSENTER_compat_after_hwframe
294b75b7f8eSPeter ZijlstraSYM_CODE_END(xen_entry_SYSENTER_compat)
29556415c4cSJuergen Gross
29656415c4cSJuergen Gross#else /* !CONFIG_IA32_EMULATION */
29756415c4cSJuergen Gross
298b75b7f8eSPeter ZijlstraSYM_CODE_START(xen_entry_SYSCALL_compat)
299b75b7f8eSPeter ZijlstraSYM_CODE_START(xen_entry_SYSENTER_compat)
300a09a6e23SPeter Zijlstra	UNWIND_HINT_ENTRY
3015b2fc515SPeter Zijlstra	ENDBR
30256415c4cSJuergen Gross	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
30356415c4cSJuergen Gross	mov $-ENOSYS, %rax
30456415c4cSJuergen Gross	pushq $0
30556415c4cSJuergen Gross	jmp hypercall_iret
306b75b7f8eSPeter ZijlstraSYM_CODE_END(xen_entry_SYSENTER_compat)
307b75b7f8eSPeter ZijlstraSYM_CODE_END(xen_entry_SYSCALL_compat)
30856415c4cSJuergen Gross
30956415c4cSJuergen Gross#endif	/* CONFIG_IA32_EMULATION */
310