xref: /openbmc/linux/arch/x86/xen/xen-asm.S (revision 6dcc5627)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
25393744bSJeremy Fitzhardinge/*
3edcb5cf8SJuergen Gross * Asm versions of Xen pv-ops, suitable for direct use.
4130ace11STejun Heo *
5130ace11STejun Heo * We only bother with direct forms (ie, vcpu in percpu data) of the
6edcb5cf8SJuergen Gross * operations here; the indirect forms are better handled in C.
75393744bSJeremy Fitzhardinge */
85393744bSJeremy Fitzhardinge
95393744bSJeremy Fitzhardinge#include <asm/asm-offsets.h>
105393744bSJeremy Fitzhardinge#include <asm/percpu.h>
115393744bSJeremy Fitzhardinge#include <asm/processor-flags.h>
128be0eb7eSJosh Poimboeuf#include <asm/frame.h>
1355aedddbSPeter Zijlstra#include <asm/asm.h>
145393744bSJeremy Fitzhardinge
15edcb5cf8SJuergen Gross#include <linux/linkage.h>
165393744bSJeremy Fitzhardinge
175393744bSJeremy Fitzhardinge/*
18130ace11STejun Heo * Enable events.  This clears the event mask and tests the pending
19130ace11STejun Heo * event status with one and operation.  If there are pending events,
20130ace11STejun Heo * then enter the hypervisor to get them handled.
215393744bSJeremy Fitzhardinge */
226dcc5627SJiri SlabySYM_FUNC_START(xen_irq_enable_direct)
238be0eb7eSJosh Poimboeuf	FRAME_BEGIN
245393744bSJeremy Fitzhardinge	/* Unmask events */
255393744bSJeremy Fitzhardinge	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
265393744bSJeremy Fitzhardinge
27130ace11STejun Heo	/*
28130ace11STejun Heo	 * Preempt here doesn't matter because that will deal with any
29130ace11STejun Heo	 * pending interrupts.  The pending check may end up being run
30130ace11STejun Heo	 * on the wrong CPU, but that doesn't hurt.
31130ace11STejun Heo	 */
325393744bSJeremy Fitzhardinge
335393744bSJeremy Fitzhardinge	/* Test for pending */
345393744bSJeremy Fitzhardinge	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
355393744bSJeremy Fitzhardinge	jz 1f
365393744bSJeremy Fitzhardinge
37edcb5cf8SJuergen Gross	call check_events
385393744bSJeremy Fitzhardinge1:
398be0eb7eSJosh Poimboeuf	FRAME_END
405393744bSJeremy Fitzhardinge	ret
416dcc5627SJiri SlabySYM_FUNC_END(xen_irq_enable_direct)
425393744bSJeremy Fitzhardinge
435393744bSJeremy Fitzhardinge
445393744bSJeremy Fitzhardinge/*
45130ace11STejun Heo * Disabling events is simply a matter of making the event mask
46130ace11STejun Heo * non-zero.
475393744bSJeremy Fitzhardinge */
486dcc5627SJiri SlabySYM_FUNC_START(xen_irq_disable_direct)
495393744bSJeremy Fitzhardinge	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
505393744bSJeremy Fitzhardinge	ret
516dcc5627SJiri SlabySYM_FUNC_END(xen_irq_disable_direct)
525393744bSJeremy Fitzhardinge
535393744bSJeremy Fitzhardinge/*
54130ace11STejun Heo * (xen_)save_fl is used to get the current interrupt enable status.
55130ace11STejun Heo * Callers expect the status to be in X86_EFLAGS_IF, and other bits
56130ace11STejun Heo * may be set in the return value.  We take advantage of this by
57130ace11STejun Heo * making sure that X86_EFLAGS_IF has the right value (and other bits
58130ace11STejun Heo * in that byte are 0), but other bits in the return value are
59130ace11STejun Heo * undefined.  We need to toggle the state of the bit, because Xen and
60130ace11STejun Heo * x86 use opposite senses (mask vs enable).
615393744bSJeremy Fitzhardinge */
626dcc5627SJiri SlabySYM_FUNC_START(xen_save_fl_direct)
635393744bSJeremy Fitzhardinge	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
645393744bSJeremy Fitzhardinge	setz %ah
655393744bSJeremy Fitzhardinge	addb %ah, %ah
665393744bSJeremy Fitzhardinge	ret
676dcc5627SJiri SlabySYM_FUNC_END(xen_save_fl_direct)
685393744bSJeremy Fitzhardinge
695393744bSJeremy Fitzhardinge
705393744bSJeremy Fitzhardinge/*
71130ace11STejun Heo * In principle the caller should be passing us a value return from
72130ace11STejun Heo * xen_save_fl_direct, but for robustness sake we test only the
73130ace11STejun Heo * X86_EFLAGS_IF flag rather than the whole byte. After setting the
74130ace11STejun Heo * interrupt mask state, it checks for unmasked pending events and
75130ace11STejun Heo * enters the hypervisor to get them delivered if so.
765393744bSJeremy Fitzhardinge */
776dcc5627SJiri SlabySYM_FUNC_START(xen_restore_fl_direct)
788be0eb7eSJosh Poimboeuf	FRAME_BEGIN
795393744bSJeremy Fitzhardinge#ifdef CONFIG_X86_64
805393744bSJeremy Fitzhardinge	testw $X86_EFLAGS_IF, %di
815393744bSJeremy Fitzhardinge#else
825393744bSJeremy Fitzhardinge	testb $X86_EFLAGS_IF>>8, %ah
835393744bSJeremy Fitzhardinge#endif
845393744bSJeremy Fitzhardinge	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
85130ace11STejun Heo	/*
86130ace11STejun Heo	 * Preempt here doesn't matter because that will deal with any
87130ace11STejun Heo	 * pending interrupts.  The pending check may end up being run
88130ace11STejun Heo	 * on the wrong CPU, but that doesn't hurt.
89130ace11STejun Heo	 */
905393744bSJeremy Fitzhardinge
915393744bSJeremy Fitzhardinge	/* check for unmasked and pending */
925393744bSJeremy Fitzhardinge	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
937eb7ce4dSDavid Vrabel	jnz 1f
94edcb5cf8SJuergen Gross	call check_events
955393744bSJeremy Fitzhardinge1:
968be0eb7eSJosh Poimboeuf	FRAME_END
975393744bSJeremy Fitzhardinge	ret
986dcc5627SJiri SlabySYM_FUNC_END(xen_restore_fl_direct)
995393744bSJeremy Fitzhardinge
1005393744bSJeremy Fitzhardinge
1015393744bSJeremy Fitzhardinge/*
102130ace11STejun Heo * Force an event check by making a hypercall, but preserve regs
103130ace11STejun Heo * before making the call.
1045393744bSJeremy Fitzhardinge */
1056dcc5627SJiri SlabySYM_FUNC_START(check_events)
1068be0eb7eSJosh Poimboeuf	FRAME_BEGIN
1075393744bSJeremy Fitzhardinge#ifdef CONFIG_X86_32
1085393744bSJeremy Fitzhardinge	push %eax
1095393744bSJeremy Fitzhardinge	push %ecx
1105393744bSJeremy Fitzhardinge	push %edx
1115393744bSJeremy Fitzhardinge	call xen_force_evtchn_callback
1125393744bSJeremy Fitzhardinge	pop %edx
1135393744bSJeremy Fitzhardinge	pop %ecx
1145393744bSJeremy Fitzhardinge	pop %eax
1155393744bSJeremy Fitzhardinge#else
1165393744bSJeremy Fitzhardinge	push %rax
1175393744bSJeremy Fitzhardinge	push %rcx
1185393744bSJeremy Fitzhardinge	push %rdx
1195393744bSJeremy Fitzhardinge	push %rsi
1205393744bSJeremy Fitzhardinge	push %rdi
1215393744bSJeremy Fitzhardinge	push %r8
1225393744bSJeremy Fitzhardinge	push %r9
1235393744bSJeremy Fitzhardinge	push %r10
1245393744bSJeremy Fitzhardinge	push %r11
1255393744bSJeremy Fitzhardinge	call xen_force_evtchn_callback
1265393744bSJeremy Fitzhardinge	pop %r11
1275393744bSJeremy Fitzhardinge	pop %r10
1285393744bSJeremy Fitzhardinge	pop %r9
1295393744bSJeremy Fitzhardinge	pop %r8
1305393744bSJeremy Fitzhardinge	pop %rdi
1315393744bSJeremy Fitzhardinge	pop %rsi
1325393744bSJeremy Fitzhardinge	pop %rdx
1335393744bSJeremy Fitzhardinge	pop %rcx
1345393744bSJeremy Fitzhardinge	pop %rax
1355393744bSJeremy Fitzhardinge#endif
1368be0eb7eSJosh Poimboeuf	FRAME_END
1375393744bSJeremy Fitzhardinge	ret
1386dcc5627SJiri SlabySYM_FUNC_END(check_events)
13955aedddbSPeter Zijlstra
1406dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2)
14155aedddbSPeter Zijlstra	FRAME_BEGIN
14255aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
14355aedddbSPeter Zijlstra	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
14455aedddbSPeter Zijlstra	FRAME_END
14555aedddbSPeter Zijlstra	ret
1466dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2);
14755aedddbSPeter Zijlstra
1486dcc5627SJiri SlabySYM_FUNC_START(xen_read_cr2_direct)
14955aedddbSPeter Zijlstra	FRAME_BEGIN
15055aedddbSPeter Zijlstra	_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
15155aedddbSPeter Zijlstra	FRAME_END
15255aedddbSPeter Zijlstra	ret
1536dcc5627SJiri SlabySYM_FUNC_END(xen_read_cr2_direct);
154