1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Asm versions of Xen pv-ops, suitable for direct use. 4 * 5 * We only bother with direct forms (ie, vcpu in percpu data) of the 6 * operations here; the indirect forms are better handled in C. 7 */ 8 9#include <asm/asm-offsets.h> 10#include <asm/percpu.h> 11#include <asm/processor-flags.h> 12#include <asm/frame.h> 13#include <asm/asm.h> 14 15#include <linux/linkage.h> 16 17/* 18 * Enable events. This clears the event mask and tests the pending 19 * event status with one and operation. If there are pending events, 20 * then enter the hypervisor to get them handled. 21 */ 22SYM_FUNC_START(xen_irq_enable_direct) 23 FRAME_BEGIN 24 /* Unmask events */ 25 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 26 27 /* 28 * Preempt here doesn't matter because that will deal with any 29 * pending interrupts. The pending check may end up being run 30 * on the wrong CPU, but that doesn't hurt. 31 */ 32 33 /* Test for pending */ 34 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 35 jz 1f 36 37 call check_events 381: 39 FRAME_END 40 ret 41SYM_FUNC_END(xen_irq_enable_direct) 42 43 44/* 45 * Disabling events is simply a matter of making the event mask 46 * non-zero. 47 */ 48SYM_FUNC_START(xen_irq_disable_direct) 49 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 50 ret 51SYM_FUNC_END(xen_irq_disable_direct) 52 53/* 54 * (xen_)save_fl is used to get the current interrupt enable status. 55 * Callers expect the status to be in X86_EFLAGS_IF, and other bits 56 * may be set in the return value. We take advantage of this by 57 * making sure that X86_EFLAGS_IF has the right value (and other bits 58 * in that byte are 0), but other bits in the return value are 59 * undefined. We need to toggle the state of the bit, because Xen and 60 * x86 use opposite senses (mask vs enable). 61 */ 62SYM_FUNC_START(xen_save_fl_direct) 63 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 64 setz %ah 65 addb %ah, %ah 66 ret 67SYM_FUNC_END(xen_save_fl_direct) 68 69 70/* 71 * In principle the caller should be passing us a value return from 72 * xen_save_fl_direct, but for robustness sake we test only the 73 * X86_EFLAGS_IF flag rather than the whole byte. After setting the 74 * interrupt mask state, it checks for unmasked pending events and 75 * enters the hypervisor to get them delivered if so. 76 */ 77SYM_FUNC_START(xen_restore_fl_direct) 78 FRAME_BEGIN 79 testw $X86_EFLAGS_IF, %di 80 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 81 /* 82 * Preempt here doesn't matter because that will deal with any 83 * pending interrupts. The pending check may end up being run 84 * on the wrong CPU, but that doesn't hurt. 85 */ 86 87 /* check for unmasked and pending */ 88 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 89 jnz 1f 90 call check_events 911: 92 FRAME_END 93 ret 94SYM_FUNC_END(xen_restore_fl_direct) 95 96 97/* 98 * Force an event check by making a hypercall, but preserve regs 99 * before making the call. 100 */ 101SYM_FUNC_START(check_events) 102 FRAME_BEGIN 103 push %rax 104 push %rcx 105 push %rdx 106 push %rsi 107 push %rdi 108 push %r8 109 push %r9 110 push %r10 111 push %r11 112 call xen_force_evtchn_callback 113 pop %r11 114 pop %r10 115 pop %r9 116 pop %r8 117 pop %rdi 118 pop %rsi 119 pop %rdx 120 pop %rcx 121 pop %rax 122 FRAME_END 123 ret 124SYM_FUNC_END(check_events) 125 126SYM_FUNC_START(xen_read_cr2) 127 FRAME_BEGIN 128 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX 129 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX 130 FRAME_END 131 ret 132SYM_FUNC_END(xen_read_cr2); 133 134SYM_FUNC_START(xen_read_cr2_direct) 135 FRAME_BEGIN 136 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX 137 FRAME_END 138 ret 139SYM_FUNC_END(xen_read_cr2_direct); 140