1/* 2 * Asm versions of Xen pv-ops, suitable for either direct use or 3 * inlining. The inline versions are the same as the direct-use 4 * versions, with the pre- and post-amble chopped off. 5 * 6 * This code is encoded for size rather than absolute efficiency, with 7 * a view to being able to inline as much as possible. 8 * 9 * We only bother with direct forms (ie, vcpu in percpu data) of the 10 * operations here; the indirect forms are better handled in C, since 11 * they're generally too large to inline anyway. 12 */ 13 14#include <asm/asm-offsets.h> 15#include <asm/percpu.h> 16#include <asm/processor-flags.h> 17#include <asm/frame.h> 18 19#include "xen-asm.h" 20 21/* 22 * Enable events. This clears the event mask and tests the pending 23 * event status with one and operation. If there are pending events, 24 * then enter the hypervisor to get them handled. 25 */ 26ENTRY(xen_irq_enable_direct) 27 FRAME_BEGIN 28 /* Unmask events */ 29 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 30 31 /* 32 * Preempt here doesn't matter because that will deal with any 33 * pending interrupts. The pending check may end up being run 34 * on the wrong CPU, but that doesn't hurt. 35 */ 36 37 /* Test for pending */ 38 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 39 jz 1f 40 412: call check_events 421: 43ENDPATCH(xen_irq_enable_direct) 44 FRAME_END 45 ret 46 ENDPROC(xen_irq_enable_direct) 47 RELOC(xen_irq_enable_direct, 2b+1) 48 49 50/* 51 * Disabling events is simply a matter of making the event mask 52 * non-zero. 53 */ 54ENTRY(xen_irq_disable_direct) 55 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 56ENDPATCH(xen_irq_disable_direct) 57 ret 58 ENDPROC(xen_irq_disable_direct) 59 RELOC(xen_irq_disable_direct, 0) 60 61/* 62 * (xen_)save_fl is used to get the current interrupt enable status. 63 * Callers expect the status to be in X86_EFLAGS_IF, and other bits 64 * may be set in the return value. We take advantage of this by 65 * making sure that X86_EFLAGS_IF has the right value (and other bits 66 * in that byte are 0), but other bits in the return value are 67 * undefined. We need to toggle the state of the bit, because Xen and 68 * x86 use opposite senses (mask vs enable). 69 */ 70ENTRY(xen_save_fl_direct) 71 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 72 setz %ah 73 addb %ah, %ah 74ENDPATCH(xen_save_fl_direct) 75 ret 76 ENDPROC(xen_save_fl_direct) 77 RELOC(xen_save_fl_direct, 0) 78 79 80/* 81 * In principle the caller should be passing us a value return from 82 * xen_save_fl_direct, but for robustness sake we test only the 83 * X86_EFLAGS_IF flag rather than the whole byte. After setting the 84 * interrupt mask state, it checks for unmasked pending events and 85 * enters the hypervisor to get them delivered if so. 86 */ 87ENTRY(xen_restore_fl_direct) 88 FRAME_BEGIN 89#ifdef CONFIG_X86_64 90 testw $X86_EFLAGS_IF, %di 91#else 92 testb $X86_EFLAGS_IF>>8, %ah 93#endif 94 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 95 /* 96 * Preempt here doesn't matter because that will deal with any 97 * pending interrupts. The pending check may end up being run 98 * on the wrong CPU, but that doesn't hurt. 99 */ 100 101 /* check for unmasked and pending */ 102 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 103 jnz 1f 1042: call check_events 1051: 106ENDPATCH(xen_restore_fl_direct) 107 FRAME_END 108 ret 109 ENDPROC(xen_restore_fl_direct) 110 RELOC(xen_restore_fl_direct, 2b+1) 111 112 113/* 114 * Force an event check by making a hypercall, but preserve regs 115 * before making the call. 116 */ 117ENTRY(check_events) 118 FRAME_BEGIN 119#ifdef CONFIG_X86_32 120 push %eax 121 push %ecx 122 push %edx 123 call xen_force_evtchn_callback 124 pop %edx 125 pop %ecx 126 pop %eax 127#else 128 push %rax 129 push %rcx 130 push %rdx 131 push %rsi 132 push %rdi 133 push %r8 134 push %r9 135 push %r10 136 push %r11 137 call xen_force_evtchn_callback 138 pop %r11 139 pop %r10 140 pop %r9 141 pop %r8 142 pop %rdi 143 pop %rsi 144 pop %rdx 145 pop %rcx 146 pop %rax 147#endif 148 FRAME_END 149 ret 150ENDPROC(check_events) 151