1/* 2 * Asm versions of Xen pv-ops, suitable for direct use. 3 * 4 * We only bother with direct forms (ie, vcpu in percpu data) of the 5 * operations here; the indirect forms are better handled in C. 6 */ 7 8#include <asm/asm-offsets.h> 9#include <asm/percpu.h> 10#include <asm/processor-flags.h> 11#include <asm/frame.h> 12 13#include <linux/linkage.h> 14 15/* 16 * Enable events. This clears the event mask and tests the pending 17 * event status with one and operation. If there are pending events, 18 * then enter the hypervisor to get them handled. 19 */ 20ENTRY(xen_irq_enable_direct) 21 FRAME_BEGIN 22 /* Unmask events */ 23 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 24 25 /* 26 * Preempt here doesn't matter because that will deal with any 27 * pending interrupts. The pending check may end up being run 28 * on the wrong CPU, but that doesn't hurt. 29 */ 30 31 /* Test for pending */ 32 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 33 jz 1f 34 35 call check_events 361: 37 FRAME_END 38 ret 39 ENDPROC(xen_irq_enable_direct) 40 41 42/* 43 * Disabling events is simply a matter of making the event mask 44 * non-zero. 45 */ 46ENTRY(xen_irq_disable_direct) 47 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 48 ret 49ENDPROC(xen_irq_disable_direct) 50 51/* 52 * (xen_)save_fl is used to get the current interrupt enable status. 53 * Callers expect the status to be in X86_EFLAGS_IF, and other bits 54 * may be set in the return value. We take advantage of this by 55 * making sure that X86_EFLAGS_IF has the right value (and other bits 56 * in that byte are 0), but other bits in the return value are 57 * undefined. We need to toggle the state of the bit, because Xen and 58 * x86 use opposite senses (mask vs enable). 59 */ 60ENTRY(xen_save_fl_direct) 61 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 62 setz %ah 63 addb %ah, %ah 64 ret 65 ENDPROC(xen_save_fl_direct) 66 67 68/* 69 * In principle the caller should be passing us a value return from 70 * xen_save_fl_direct, but for robustness sake we test only the 71 * X86_EFLAGS_IF flag rather than the whole byte. After setting the 72 * interrupt mask state, it checks for unmasked pending events and 73 * enters the hypervisor to get them delivered if so. 74 */ 75ENTRY(xen_restore_fl_direct) 76 FRAME_BEGIN 77#ifdef CONFIG_X86_64 78 testw $X86_EFLAGS_IF, %di 79#else 80 testb $X86_EFLAGS_IF>>8, %ah 81#endif 82 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 83 /* 84 * Preempt here doesn't matter because that will deal with any 85 * pending interrupts. The pending check may end up being run 86 * on the wrong CPU, but that doesn't hurt. 87 */ 88 89 /* check for unmasked and pending */ 90 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 91 jnz 1f 92 call check_events 931: 94 FRAME_END 95 ret 96 ENDPROC(xen_restore_fl_direct) 97 98 99/* 100 * Force an event check by making a hypercall, but preserve regs 101 * before making the call. 102 */ 103ENTRY(check_events) 104 FRAME_BEGIN 105#ifdef CONFIG_X86_32 106 push %eax 107 push %ecx 108 push %edx 109 call xen_force_evtchn_callback 110 pop %edx 111 pop %ecx 112 pop %eax 113#else 114 push %rax 115 push %rcx 116 push %rdx 117 push %rsi 118 push %rdi 119 push %r8 120 push %r9 121 push %r10 122 push %r11 123 call xen_force_evtchn_callback 124 pop %r11 125 pop %r10 126 pop %r9 127 pop %r8 128 pop %rdi 129 pop %rsi 130 pop %rdx 131 pop %rcx 132 pop %rax 133#endif 134 FRAME_END 135 ret 136ENDPROC(check_events) 137