1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Asm versions of Xen pv-ops, suitable for direct use. 4 * 5 * We only bother with direct forms (ie, vcpu in percpu data) of the 6 * operations here; the indirect forms are better handled in C. 7 */ 8 9#include <asm/asm-offsets.h> 10#include <asm/percpu.h> 11#include <asm/processor-flags.h> 12#include <asm/frame.h> 13 14#include <linux/linkage.h> 15 16/* 17 * Enable events. This clears the event mask and tests the pending 18 * event status with one and operation. If there are pending events, 19 * then enter the hypervisor to get them handled. 20 */ 21ENTRY(xen_irq_enable_direct) 22 FRAME_BEGIN 23 /* Unmask events */ 24 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 25 26 /* 27 * Preempt here doesn't matter because that will deal with any 28 * pending interrupts. The pending check may end up being run 29 * on the wrong CPU, but that doesn't hurt. 30 */ 31 32 /* Test for pending */ 33 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 34 jz 1f 35 36 call check_events 371: 38 FRAME_END 39 ret 40 ENDPROC(xen_irq_enable_direct) 41 42 43/* 44 * Disabling events is simply a matter of making the event mask 45 * non-zero. 46 */ 47ENTRY(xen_irq_disable_direct) 48 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 49 ret 50ENDPROC(xen_irq_disable_direct) 51 52/* 53 * (xen_)save_fl is used to get the current interrupt enable status. 54 * Callers expect the status to be in X86_EFLAGS_IF, and other bits 55 * may be set in the return value. We take advantage of this by 56 * making sure that X86_EFLAGS_IF has the right value (and other bits 57 * in that byte are 0), but other bits in the return value are 58 * undefined. We need to toggle the state of the bit, because Xen and 59 * x86 use opposite senses (mask vs enable). 60 */ 61ENTRY(xen_save_fl_direct) 62 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 63 setz %ah 64 addb %ah, %ah 65 ret 66 ENDPROC(xen_save_fl_direct) 67 68 69/* 70 * In principle the caller should be passing us a value return from 71 * xen_save_fl_direct, but for robustness sake we test only the 72 * X86_EFLAGS_IF flag rather than the whole byte. After setting the 73 * interrupt mask state, it checks for unmasked pending events and 74 * enters the hypervisor to get them delivered if so. 75 */ 76ENTRY(xen_restore_fl_direct) 77 FRAME_BEGIN 78#ifdef CONFIG_X86_64 79 testw $X86_EFLAGS_IF, %di 80#else 81 testb $X86_EFLAGS_IF>>8, %ah 82#endif 83 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 84 /* 85 * Preempt here doesn't matter because that will deal with any 86 * pending interrupts. The pending check may end up being run 87 * on the wrong CPU, but that doesn't hurt. 88 */ 89 90 /* check for unmasked and pending */ 91 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 92 jnz 1f 93 call check_events 941: 95 FRAME_END 96 ret 97 ENDPROC(xen_restore_fl_direct) 98 99 100/* 101 * Force an event check by making a hypercall, but preserve regs 102 * before making the call. 103 */ 104ENTRY(check_events) 105 FRAME_BEGIN 106#ifdef CONFIG_X86_32 107 push %eax 108 push %ecx 109 push %edx 110 call xen_force_evtchn_callback 111 pop %edx 112 pop %ecx 113 pop %eax 114#else 115 push %rax 116 push %rcx 117 push %rdx 118 push %rsi 119 push %rdi 120 push %r8 121 push %r9 122 push %r10 123 push %r11 124 call xen_force_evtchn_callback 125 pop %r11 126 pop %r10 127 pop %r9 128 pop %r8 129 pop %rdi 130 pop %rsi 131 pop %rdx 132 pop %rcx 133 pop %rax 134#endif 135 FRAME_END 136 ret 137ENDPROC(check_events) 138