xref: /openbmc/linux/arch/x86/xen/xen-asm.S (revision d7bfc7d5)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
25393744bSJeremy Fitzhardinge/*
3edcb5cf8SJuergen Gross * Asm versions of Xen pv-ops, suitable for direct use.
4130ace11STejun Heo *
5130ace11STejun Heo * We only bother with direct forms (ie, vcpu in percpu data) of the
6edcb5cf8SJuergen Gross * operations here; the indirect forms are better handled in C.
75393744bSJeremy Fitzhardinge */
85393744bSJeremy Fitzhardinge
956415c4cSJuergen Gross#include <asm/errno.h>
105393744bSJeremy Fitzhardinge#include <asm/asm-offsets.h>
115393744bSJeremy Fitzhardinge#include <asm/percpu.h>
125393744bSJeremy Fitzhardinge#include <asm/processor-flags.h>
1356415c4cSJuergen Gross#include <asm/segment.h>
1456415c4cSJuergen Gross#include <asm/thread_info.h>
1555aedddbSPeter Zijlstra#include <asm/asm.h>
1656415c4cSJuergen Gross#include <asm/frame.h>
17cde07a4eSJosh Poimboeuf#include <asm/unwind_hints.h>
185393744bSJeremy Fitzhardinge
1956415c4cSJuergen Gross#include <xen/interface/xen.h>
2056415c4cSJuergen Gross
2156415c4cSJuergen Gross#include <linux/init.h>
22edcb5cf8SJuergen Gross#include <linux/linkage.h>
235393744bSJeremy Fitzhardinge
245393744bSJeremy Fitzhardinge/*
25130ace11STejun Heo * Disabling events is simply a matter of making the event mask
26130ace11STejun Heo * non-zero.
275393744bSJeremy Fitzhardinge */
286dcc5627SJiri SlabySYM_FUNC_START(xen_irq_disable_direct)
295393744bSJeremy Fitzhardinge	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
305393744bSJeremy Fitzhardinge	ret
316dcc5627SJiri SlabySYM_FUNC_END(xen_irq_disable_direct)
325393744bSJeremy Fitzhardinge
33*d7bfc7d5SPeter Zijlstra.pushsection .noinstr.text, "ax"
34*d7bfc7d5SPeter Zijlstra
355393744bSJeremy Fitzhardinge/*
36130ace11STejun Heo * Force an event check by making a hypercall, but preserve regs
37130ace11STejun Heo * before making the call.
385393744bSJeremy Fitzhardinge */
396dcc5627SJiri SlabySYM_FUNC_START(check_events)
408be0eb7eSJosh Poimboeuf	FRAME_BEGIN
415393744bSJeremy Fitzhardinge	push %rax
425393744bSJeremy Fitzhardinge	push %rcx
435393744bSJeremy Fitzhardinge	push %rdx
445393744bSJeremy Fitzhardinge	push %rsi
455393744bSJeremy Fitzhardinge	push %rdi
465393744bSJeremy Fitzhardinge	push %r8
475393744bSJeremy Fitzhardinge	push %r9
485393744bSJeremy Fitzhardinge	push %r10
495393744bSJeremy Fitzhardinge	push %r11
505393744bSJeremy Fitzhardinge	call xen_force_evtchn_callback
515393744bSJeremy Fitzhardinge	pop %r11
525393744bSJeremy Fitzhardinge	pop %r10
535393744bSJeremy Fitzhardinge	pop %r9
545393744bSJeremy Fitzhardinge	pop %r8
555393744bSJeremy Fitzhardinge	pop %rdi
565393744bSJeremy Fitzhardinge	pop %rsi
575393744bSJeremy Fitzhardinge	pop %rdx
585393744bSJeremy Fitzhardinge	pop %rcx
595393744bSJeremy Fitzhardinge	pop %rax
608be0eb7eSJosh Poimboeuf	FRAME_END
615393744bSJeremy Fitzhardinge	ret
626dcc5627SJiri SlabySYM_FUNC_END(check_events)
6355aedddbSPeter Zijlstra
64*d7bfc7d5SPeter Zijlstra/*
65*d7bfc7d5SPeter Zijlstra * Enable events.  This clears the event mask and tests the pending
66*d7bfc7d5SPeter Zijlstra * event status with one and operation.  If there are pending events,
67*d7bfc7d5SPeter Zijlstra * then enter the hypervisor to get them handled.
68*d7bfc7d5SPeter Zijlstra */
69*d7bfc7d5SPeter ZijlstraSYM_FUNC_START(xen_irq_enable_direct)
70*d7bfc7d5SPeter Zijlstra	FRAME_BEGIN
71*d7bfc7d5SPeter Zijlstra	/* Unmask events */
72*d7bfc7d5SPeter Zijlstra	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
73*d7bfc7d5SPeter Zijlstra
74*d7bfc7d5SPeter Zijlstra	/*
75*d7bfc7d5SPeter Zijlstra	 * Preempt here doesn't matter because that will deal with any
76*d7bfc7d5SPeter Zijlstra	 * pending interrupts.  The pending check may end up being run
77*d7bfc7d5SPeter Zijlstra	 * on the wrong CPU, but that doesn't hurt.
78*d7bfc7d5SPeter Zijlstra	 */
79*d7bfc7d5SPeter Zijlstra
80*d7bfc7d5SPeter Zijlstra	/* Test for pending */
81*d7bfc7d5SPeter Zijlstra	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
82*d7bfc7d5SPeter Zijlstra	jz 1f
83*d7bfc7d5SPeter Zijlstra
84*d7bfc7d5SPeter Zijlstra	call check_events
85*d7bfc7d5SPeter Zijlstra1:
86*d7bfc7d5SPeter Zijlstra	FRAME_END
87*d7bfc7d5SPeter Zijlstra	ret
88*d7bfc7d5SPeter ZijlstraSYM_FUNC_END(xen_irq_enable_direct)
89*d7bfc7d5SPeter Zijlstra
9020125c87SPeter Zijlstra/*
9120125c87SPeter Zijlstra * (xen_)save_fl is used to get the current interrupt enable status.
9220125c87SPeter Zijlstra * Callers expect the status to be in X86_EFLAGS_IF, and other bits
9320125c87SPeter Zijlstra * may be set in the return value.  We take advantage of this by
9420125c87SPeter Zijlstra * making sure that X86_EFLAGS_IF has the right value (and other bits
9520125c87SPeter Zijlstra * in that byte are 0), but other bits in the return value are
9620125c87SPeter Zijlstra * undefined.  We need to toggle the state of the bit, because Xen and
9720125c87SPeter Zijlstra * x86 use opposite senses (mask vs enable).
9820125c87SPeter Zijlstra */
9920125c87SPeter ZijlstraSYM_FUNC_START(xen_save_fl_direct)
10020125c87SPeter Zijlstra	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
10120125c87SPeter Zijlstra	setz %ah
10220125c87SPeter Zijlstra	addb %ah, %ah
10320125c87SPeter Zijlstra	ret
10420125c87SPeter ZijlstraSYM_FUNC_END(xen_save_fl_direct)
10520125c87SPeter Zijlstra
1066dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2)
10755aedddbSPeter Zijlstra	FRAME_BEGIN
10855aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
10955aedddbSPeter Zijlstra	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
11055aedddbSPeter Zijlstra	FRAME_END
11155aedddbSPeter Zijlstra	ret
1126dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2);
11355aedddbSPeter Zijlstra
1146dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2_direct)
11555aedddbSPeter Zijlstra	FRAME_BEGIN
11655aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
11755aedddbSPeter Zijlstra	FRAME_END
11855aedddbSPeter Zijlstra	ret
1196dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2_direct);
1200a53c9acSPeter Zijlstra.popsection
12156415c4cSJuergen Gross
12256415c4cSJuergen Gross.macro xen_pv_trap name
12356415c4cSJuergen GrossSYM_CODE_START(xen_\name)
124cde07a4eSJosh Poimboeuf	UNWIND_HINT_EMPTY
12556415c4cSJuergen Gross	pop %rcx
12656415c4cSJuergen Gross	pop %r11
12756415c4cSJuergen Gross	jmp  \name
12856415c4cSJuergen GrossSYM_CODE_END(xen_\name)
12956415c4cSJuergen Gross_ASM_NOKPROBE(xen_\name)
13056415c4cSJuergen Gross.endm
13156415c4cSJuergen Gross
13256415c4cSJuergen Grossxen_pv_trap asm_exc_divide_error
13356415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_debug
13456415c4cSJuergen Grossxen_pv_trap asm_exc_int3
13556415c4cSJuergen Grossxen_pv_trap asm_xenpv_exc_nmi
13656415c4cSJuergen Grossxen_pv_trap asm_exc_overflow
13756415c4cSJuergen Grossxen_pv_trap asm_exc_bounds
13856415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_op
13956415c4cSJuergen Grossxen_pv_trap asm_exc_device_not_available
1405b4c6d65SJuergen Grossxen_pv_trap asm_xenpv_exc_double_fault
14156415c4cSJuergen Grossxen_pv_trap asm_exc_coproc_segment_overrun
14256415c4cSJuergen Grossxen_pv_trap asm_exc_invalid_tss
14356415c4cSJuergen Grossxen_pv_trap asm_exc_segment_not_present
14456415c4cSJuergen Grossxen_pv_trap asm_exc_stack_segment
14556415c4cSJuergen Grossxen_pv_trap asm_exc_general_protection
14656415c4cSJuergen Grossxen_pv_trap asm_exc_page_fault
14756415c4cSJuergen Grossxen_pv_trap asm_exc_spurious_interrupt_bug
14856415c4cSJuergen Grossxen_pv_trap asm_exc_coprocessor_error
14956415c4cSJuergen Grossxen_pv_trap asm_exc_alignment_check
15056415c4cSJuergen Gross#ifdef CONFIG_X86_MCE
151c3d7fa66SJuergen Grossxen_pv_trap asm_xenpv_exc_machine_check
15256415c4cSJuergen Gross#endif /* CONFIG_X86_MCE */
15356415c4cSJuergen Grossxen_pv_trap asm_exc_simd_coprocessor_error
15456415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION
15556415c4cSJuergen Grossxen_pv_trap entry_INT80_compat
15656415c4cSJuergen Gross#endif
1572e924936SJuergen Grossxen_pv_trap asm_exc_xen_unknown_trap
15856415c4cSJuergen Grossxen_pv_trap asm_exc_xen_hypervisor_callback
15956415c4cSJuergen Gross
16056415c4cSJuergen Gross	__INIT
16156415c4cSJuergen GrossSYM_CODE_START(xen_early_idt_handler_array)
16256415c4cSJuergen Gross	i = 0
16356415c4cSJuergen Gross	.rept NUM_EXCEPTION_VECTORS
164cde07a4eSJosh Poimboeuf	UNWIND_HINT_EMPTY
16556415c4cSJuergen Gross	pop %rcx
16656415c4cSJuergen Gross	pop %r11
16756415c4cSJuergen Gross	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
16856415c4cSJuergen Gross	i = i + 1
16956415c4cSJuergen Gross	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
17056415c4cSJuergen Gross	.endr
17156415c4cSJuergen GrossSYM_CODE_END(xen_early_idt_handler_array)
17256415c4cSJuergen Gross	__FINIT
17356415c4cSJuergen Gross
17456415c4cSJuergen Grosshypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
17556415c4cSJuergen Gross/*
17656415c4cSJuergen Gross * Xen64 iret frame:
17756415c4cSJuergen Gross *
17856415c4cSJuergen Gross *	ss
17956415c4cSJuergen Gross *	rsp
18056415c4cSJuergen Gross *	rflags
18156415c4cSJuergen Gross *	cs
18256415c4cSJuergen Gross *	rip		<-- standard iret frame
18356415c4cSJuergen Gross *
18456415c4cSJuergen Gross *	flags
18556415c4cSJuergen Gross *
18656415c4cSJuergen Gross *	rcx		}
18756415c4cSJuergen Gross *	r11		}<-- pushed by hypercall page
18856415c4cSJuergen Gross * rsp->rax		}
18956415c4cSJuergen Gross */
19056415c4cSJuergen GrossSYM_CODE_START(xen_iret)
191cde07a4eSJosh Poimboeuf	UNWIND_HINT_EMPTY
19256415c4cSJuergen Gross	pushq $0
19356415c4cSJuergen Gross	jmp hypercall_iret
19456415c4cSJuergen GrossSYM_CODE_END(xen_iret)
19556415c4cSJuergen Gross
19656415c4cSJuergen Gross/*
19756415c4cSJuergen Gross * Xen handles syscall callbacks much like ordinary exceptions, which
19856415c4cSJuergen Gross * means we have:
19956415c4cSJuergen Gross * - kernel gs
20056415c4cSJuergen Gross * - kernel rsp
20156415c4cSJuergen Gross * - an iret-like stack frame on the stack (including rcx and r11):
20256415c4cSJuergen Gross *	ss
20356415c4cSJuergen Gross *	rsp
20456415c4cSJuergen Gross *	rflags
20556415c4cSJuergen Gross *	cs
20656415c4cSJuergen Gross *	rip
20756415c4cSJuergen Gross *	r11
20856415c4cSJuergen Gross * rsp->rcx
20956415c4cSJuergen Gross */
21056415c4cSJuergen Gross
21156415c4cSJuergen Gross/* Normal 64-bit system call target */
212cde07a4eSJosh PoimboeufSYM_CODE_START(xen_syscall_target)
213cde07a4eSJosh Poimboeuf	UNWIND_HINT_EMPTY
21456415c4cSJuergen Gross	popq %rcx
21556415c4cSJuergen Gross	popq %r11
21656415c4cSJuergen Gross
21756415c4cSJuergen Gross	/*
21856415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
21956415c4cSJuergen Gross	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
22056415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
22156415c4cSJuergen Gross	 */
22256415c4cSJuergen Gross	movq $__USER_DS, 4*8(%rsp)
22356415c4cSJuergen Gross	movq $__USER_CS, 1*8(%rsp)
22456415c4cSJuergen Gross
22556415c4cSJuergen Gross	jmp entry_SYSCALL_64_after_hwframe
226cde07a4eSJosh PoimboeufSYM_CODE_END(xen_syscall_target)
22756415c4cSJuergen Gross
22856415c4cSJuergen Gross#ifdef CONFIG_IA32_EMULATION
22956415c4cSJuergen Gross
23056415c4cSJuergen Gross/* 32-bit compat syscall target */
231cde07a4eSJosh PoimboeufSYM_CODE_START(xen_syscall32_target)
232cde07a4eSJosh Poimboeuf	UNWIND_HINT_EMPTY
23356415c4cSJuergen Gross	popq %rcx
23456415c4cSJuergen Gross	popq %r11
23556415c4cSJuergen Gross
23656415c4cSJuergen Gross	/*
23756415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
23856415c4cSJuergen Gross	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
23956415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
24056415c4cSJuergen Gross	 */
24156415c4cSJuergen Gross	movq $__USER32_DS, 4*8(%rsp)
24256415c4cSJuergen Gross	movq $__USER32_CS, 1*8(%rsp)
24356415c4cSJuergen Gross
24456415c4cSJuergen Gross	jmp entry_SYSCALL_compat_after_hwframe
245cde07a4eSJosh PoimboeufSYM_CODE_END(xen_syscall32_target)
24656415c4cSJuergen Gross
24756415c4cSJuergen Gross/* 32-bit compat sysenter target */
248cde07a4eSJosh PoimboeufSYM_CODE_START(xen_sysenter_target)
249cde07a4eSJosh Poimboeuf	UNWIND_HINT_EMPTY
25056415c4cSJuergen Gross	/*
25156415c4cSJuergen Gross	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
25256415c4cSJuergen Gross	 * that we don't need to guard against single step exceptions here.
25356415c4cSJuergen Gross	 */
25456415c4cSJuergen Gross	popq %rcx
25556415c4cSJuergen Gross	popq %r11
25656415c4cSJuergen Gross
25756415c4cSJuergen Gross	/*
25856415c4cSJuergen Gross	 * Neither Xen nor the kernel really knows what the old SS and
25956415c4cSJuergen Gross	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
26056415c4cSJuergen Gross	 * report those values even though Xen will guess its own values.
26156415c4cSJuergen Gross	 */
26256415c4cSJuergen Gross	movq $__USER32_DS, 4*8(%rsp)
26356415c4cSJuergen Gross	movq $__USER32_CS, 1*8(%rsp)
26456415c4cSJuergen Gross
26556415c4cSJuergen Gross	jmp entry_SYSENTER_compat_after_hwframe
266cde07a4eSJosh PoimboeufSYM_CODE_END(xen_sysenter_target)
26756415c4cSJuergen Gross
26856415c4cSJuergen Gross#else /* !CONFIG_IA32_EMULATION */
26956415c4cSJuergen Gross
270cde07a4eSJosh PoimboeufSYM_CODE_START(xen_syscall32_target)
271cde07a4eSJosh PoimboeufSYM_CODE_START(xen_sysenter_target)
272cde07a4eSJosh Poimboeuf	UNWIND_HINT_EMPTY
27356415c4cSJuergen Gross	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
27456415c4cSJuergen Gross	mov $-ENOSYS, %rax
27556415c4cSJuergen Gross	pushq $0
27656415c4cSJuergen Gross	jmp hypercall_iret
277cde07a4eSJosh PoimboeufSYM_CODE_END(xen_sysenter_target)
278cde07a4eSJosh PoimboeufSYM_CODE_END(xen_syscall32_target)
27956415c4cSJuergen Gross
28056415c4cSJuergen Gross#endif	/* CONFIG_IA32_EMULATION */
281