xref: /openbmc/linux/arch/x86/xen/xen-asm.S (revision 8be0eb7e)
15393744bSJeremy Fitzhardinge/*
2130ace11STejun Heo * Asm versions of Xen pv-ops, suitable for either direct use or
3130ace11STejun Heo * inlining.  The inline versions are the same as the direct-use
4130ace11STejun Heo * versions, with the pre- and post-amble chopped off.
5130ace11STejun Heo *
6130ace11STejun Heo * This code is encoded for size rather than absolute efficiency, with
7130ace11STejun Heo * a view to being able to inline as much as possible.
8130ace11STejun Heo *
9130ace11STejun Heo * We only bother with direct forms (ie, vcpu in percpu data) of the
10130ace11STejun Heo * operations here; the indirect forms are better handled in C, since
11130ace11STejun Heo * they're generally too large to inline anyway.
125393744bSJeremy Fitzhardinge */
135393744bSJeremy Fitzhardinge
145393744bSJeremy Fitzhardinge#include <asm/asm-offsets.h>
155393744bSJeremy Fitzhardinge#include <asm/percpu.h>
165393744bSJeremy Fitzhardinge#include <asm/processor-flags.h>
178be0eb7eSJosh Poimboeuf#include <asm/frame.h>
185393744bSJeremy Fitzhardinge
195393744bSJeremy Fitzhardinge#include "xen-asm.h"
205393744bSJeremy Fitzhardinge
215393744bSJeremy Fitzhardinge/*
22130ace11STejun Heo * Enable events.  This clears the event mask and tests the pending
23130ace11STejun Heo * event status with one and operation.  If there are pending events,
24130ace11STejun Heo * then enter the hypervisor to get them handled.
255393744bSJeremy Fitzhardinge */
265393744bSJeremy FitzhardingeENTRY(xen_irq_enable_direct)
278be0eb7eSJosh Poimboeuf	FRAME_BEGIN
285393744bSJeremy Fitzhardinge	/* Unmask events */
295393744bSJeremy Fitzhardinge	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
305393744bSJeremy Fitzhardinge
31130ace11STejun Heo	/*
32130ace11STejun Heo	 * Preempt here doesn't matter because that will deal with any
33130ace11STejun Heo	 * pending interrupts.  The pending check may end up being run
34130ace11STejun Heo	 * on the wrong CPU, but that doesn't hurt.
35130ace11STejun Heo	 */
365393744bSJeremy Fitzhardinge
375393744bSJeremy Fitzhardinge	/* Test for pending */
385393744bSJeremy Fitzhardinge	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
395393744bSJeremy Fitzhardinge	jz 1f
405393744bSJeremy Fitzhardinge
415393744bSJeremy Fitzhardinge2:	call check_events
425393744bSJeremy Fitzhardinge1:
435393744bSJeremy FitzhardingeENDPATCH(xen_irq_enable_direct)
448be0eb7eSJosh Poimboeuf	FRAME_END
455393744bSJeremy Fitzhardinge	ret
465393744bSJeremy Fitzhardinge	ENDPROC(xen_irq_enable_direct)
475393744bSJeremy Fitzhardinge	RELOC(xen_irq_enable_direct, 2b+1)
485393744bSJeremy Fitzhardinge
495393744bSJeremy Fitzhardinge
505393744bSJeremy Fitzhardinge/*
51130ace11STejun Heo * Disabling events is simply a matter of making the event mask
52130ace11STejun Heo * non-zero.
535393744bSJeremy Fitzhardinge */
545393744bSJeremy FitzhardingeENTRY(xen_irq_disable_direct)
555393744bSJeremy Fitzhardinge	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
565393744bSJeremy FitzhardingeENDPATCH(xen_irq_disable_direct)
575393744bSJeremy Fitzhardinge	ret
585393744bSJeremy Fitzhardinge	ENDPROC(xen_irq_disable_direct)
595393744bSJeremy Fitzhardinge	RELOC(xen_irq_disable_direct, 0)
605393744bSJeremy Fitzhardinge
615393744bSJeremy Fitzhardinge/*
62130ace11STejun Heo * (xen_)save_fl is used to get the current interrupt enable status.
63130ace11STejun Heo * Callers expect the status to be in X86_EFLAGS_IF, and other bits
64130ace11STejun Heo * may be set in the return value.  We take advantage of this by
65130ace11STejun Heo * making sure that X86_EFLAGS_IF has the right value (and other bits
66130ace11STejun Heo * in that byte are 0), but other bits in the return value are
67130ace11STejun Heo * undefined.  We need to toggle the state of the bit, because Xen and
68130ace11STejun Heo * x86 use opposite senses (mask vs enable).
695393744bSJeremy Fitzhardinge */
705393744bSJeremy FitzhardingeENTRY(xen_save_fl_direct)
715393744bSJeremy Fitzhardinge	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
725393744bSJeremy Fitzhardinge	setz %ah
735393744bSJeremy Fitzhardinge	addb %ah, %ah
745393744bSJeremy FitzhardingeENDPATCH(xen_save_fl_direct)
755393744bSJeremy Fitzhardinge	ret
765393744bSJeremy Fitzhardinge	ENDPROC(xen_save_fl_direct)
775393744bSJeremy Fitzhardinge	RELOC(xen_save_fl_direct, 0)
785393744bSJeremy Fitzhardinge
795393744bSJeremy Fitzhardinge
805393744bSJeremy Fitzhardinge/*
81130ace11STejun Heo * In principle the caller should be passing us a value return from
82130ace11STejun Heo * xen_save_fl_direct, but for robustness sake we test only the
83130ace11STejun Heo * X86_EFLAGS_IF flag rather than the whole byte. After setting the
84130ace11STejun Heo * interrupt mask state, it checks for unmasked pending events and
85130ace11STejun Heo * enters the hypervisor to get them delivered if so.
865393744bSJeremy Fitzhardinge */
875393744bSJeremy FitzhardingeENTRY(xen_restore_fl_direct)
888be0eb7eSJosh Poimboeuf	FRAME_BEGIN
895393744bSJeremy Fitzhardinge#ifdef CONFIG_X86_64
905393744bSJeremy Fitzhardinge	testw $X86_EFLAGS_IF, %di
915393744bSJeremy Fitzhardinge#else
925393744bSJeremy Fitzhardinge	testb $X86_EFLAGS_IF>>8, %ah
935393744bSJeremy Fitzhardinge#endif
945393744bSJeremy Fitzhardinge	setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
95130ace11STejun Heo	/*
96130ace11STejun Heo	 * Preempt here doesn't matter because that will deal with any
97130ace11STejun Heo	 * pending interrupts.  The pending check may end up being run
98130ace11STejun Heo	 * on the wrong CPU, but that doesn't hurt.
99130ace11STejun Heo	 */
1005393744bSJeremy Fitzhardinge
1015393744bSJeremy Fitzhardinge	/* check for unmasked and pending */
1025393744bSJeremy Fitzhardinge	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
1037eb7ce4dSDavid Vrabel	jnz 1f
1045393744bSJeremy Fitzhardinge2:	call check_events
1055393744bSJeremy Fitzhardinge1:
1065393744bSJeremy FitzhardingeENDPATCH(xen_restore_fl_direct)
1078be0eb7eSJosh Poimboeuf	FRAME_END
1085393744bSJeremy Fitzhardinge	ret
1095393744bSJeremy Fitzhardinge	ENDPROC(xen_restore_fl_direct)
1105393744bSJeremy Fitzhardinge	RELOC(xen_restore_fl_direct, 2b+1)
1115393744bSJeremy Fitzhardinge
1125393744bSJeremy Fitzhardinge
1135393744bSJeremy Fitzhardinge/*
114130ace11STejun Heo * Force an event check by making a hypercall, but preserve regs
115130ace11STejun Heo * before making the call.
1165393744bSJeremy Fitzhardinge */
1178be0eb7eSJosh PoimboeufENTRY(check_events)
1188be0eb7eSJosh Poimboeuf	FRAME_BEGIN
1195393744bSJeremy Fitzhardinge#ifdef CONFIG_X86_32
1205393744bSJeremy Fitzhardinge	push %eax
1215393744bSJeremy Fitzhardinge	push %ecx
1225393744bSJeremy Fitzhardinge	push %edx
1235393744bSJeremy Fitzhardinge	call xen_force_evtchn_callback
1245393744bSJeremy Fitzhardinge	pop %edx
1255393744bSJeremy Fitzhardinge	pop %ecx
1265393744bSJeremy Fitzhardinge	pop %eax
1275393744bSJeremy Fitzhardinge#else
1285393744bSJeremy Fitzhardinge	push %rax
1295393744bSJeremy Fitzhardinge	push %rcx
1305393744bSJeremy Fitzhardinge	push %rdx
1315393744bSJeremy Fitzhardinge	push %rsi
1325393744bSJeremy Fitzhardinge	push %rdi
1335393744bSJeremy Fitzhardinge	push %r8
1345393744bSJeremy Fitzhardinge	push %r9
1355393744bSJeremy Fitzhardinge	push %r10
1365393744bSJeremy Fitzhardinge	push %r11
1375393744bSJeremy Fitzhardinge	call xen_force_evtchn_callback
1385393744bSJeremy Fitzhardinge	pop %r11
1395393744bSJeremy Fitzhardinge	pop %r10
1405393744bSJeremy Fitzhardinge	pop %r9
1415393744bSJeremy Fitzhardinge	pop %r8
1425393744bSJeremy Fitzhardinge	pop %rdi
1435393744bSJeremy Fitzhardinge	pop %rsi
1445393744bSJeremy Fitzhardinge	pop %rdx
1455393744bSJeremy Fitzhardinge	pop %rcx
1465393744bSJeremy Fitzhardinge	pop %rax
1475393744bSJeremy Fitzhardinge#endif
1488be0eb7eSJosh Poimboeuf	FRAME_END
1495393744bSJeremy Fitzhardinge	ret
1508be0eb7eSJosh PoimboeufENDPROC(check_events)
151