1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Asm versions of Xen pv-ops, suitable for direct use. 4 * 5 * We only bother with direct forms (ie, vcpu in percpu data) of the 6 * operations here; the indirect forms are better handled in C. 7 */ 8 9#include <asm/errno.h> 10#include <asm/asm-offsets.h> 11#include <asm/percpu.h> 12#include <asm/processor-flags.h> 13#include <asm/segment.h> 14#include <asm/thread_info.h> 15#include <asm/asm.h> 16#include <asm/frame.h> 17 18#include <xen/interface/xen.h> 19 20#include <linux/init.h> 21#include <linux/linkage.h> 22 23/* 24 * Enable events. This clears the event mask and tests the pending 25 * event status with one and operation. If there are pending events, 26 * then enter the hypervisor to get them handled. 27 */ 28SYM_FUNC_START(xen_irq_enable_direct) 29 FRAME_BEGIN 30 /* Unmask events */ 31 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 32 33 /* 34 * Preempt here doesn't matter because that will deal with any 35 * pending interrupts. The pending check may end up being run 36 * on the wrong CPU, but that doesn't hurt. 37 */ 38 39 /* Test for pending */ 40 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 41 jz 1f 42 43 call check_events 441: 45 FRAME_END 46 ret 47SYM_FUNC_END(xen_irq_enable_direct) 48 49 50/* 51 * Disabling events is simply a matter of making the event mask 52 * non-zero. 53 */ 54SYM_FUNC_START(xen_irq_disable_direct) 55 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 56 ret 57SYM_FUNC_END(xen_irq_disable_direct) 58 59/* 60 * (xen_)save_fl is used to get the current interrupt enable status. 61 * Callers expect the status to be in X86_EFLAGS_IF, and other bits 62 * may be set in the return value. We take advantage of this by 63 * making sure that X86_EFLAGS_IF has the right value (and other bits 64 * in that byte are 0), but other bits in the return value are 65 * undefined. We need to toggle the state of the bit, because Xen and 66 * x86 use opposite senses (mask vs enable). 67 */ 68SYM_FUNC_START(xen_save_fl_direct) 69 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 70 setz %ah 71 addb %ah, %ah 72 ret 73SYM_FUNC_END(xen_save_fl_direct) 74 75 76/* 77 * In principle the caller should be passing us a value return from 78 * xen_save_fl_direct, but for robustness sake we test only the 79 * X86_EFLAGS_IF flag rather than the whole byte. After setting the 80 * interrupt mask state, it checks for unmasked pending events and 81 * enters the hypervisor to get them delivered if so. 82 */ 83SYM_FUNC_START(xen_restore_fl_direct) 84 FRAME_BEGIN 85 testw $X86_EFLAGS_IF, %di 86 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 87 /* 88 * Preempt here doesn't matter because that will deal with any 89 * pending interrupts. The pending check may end up being run 90 * on the wrong CPU, but that doesn't hurt. 91 */ 92 93 /* check for unmasked and pending */ 94 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 95 jnz 1f 96 call check_events 971: 98 FRAME_END 99 ret 100SYM_FUNC_END(xen_restore_fl_direct) 101 102 103/* 104 * Force an event check by making a hypercall, but preserve regs 105 * before making the call. 106 */ 107SYM_FUNC_START(check_events) 108 FRAME_BEGIN 109 push %rax 110 push %rcx 111 push %rdx 112 push %rsi 113 push %rdi 114 push %r8 115 push %r9 116 push %r10 117 push %r11 118 call xen_force_evtchn_callback 119 pop %r11 120 pop %r10 121 pop %r9 122 pop %r8 123 pop %rdi 124 pop %rsi 125 pop %rdx 126 pop %rcx 127 pop %rax 128 FRAME_END 129 ret 130SYM_FUNC_END(check_events) 131 132SYM_FUNC_START(xen_read_cr2) 133 FRAME_BEGIN 134 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX 135 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX 136 FRAME_END 137 ret 138SYM_FUNC_END(xen_read_cr2); 139 140SYM_FUNC_START(xen_read_cr2_direct) 141 FRAME_BEGIN 142 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX 143 FRAME_END 144 ret 145SYM_FUNC_END(xen_read_cr2_direct); 146 147.macro xen_pv_trap name 148SYM_CODE_START(xen_\name) 149 pop %rcx 150 pop %r11 151 jmp \name 152SYM_CODE_END(xen_\name) 153_ASM_NOKPROBE(xen_\name) 154.endm 155 156xen_pv_trap asm_exc_divide_error 157xen_pv_trap asm_xenpv_exc_debug 158xen_pv_trap asm_exc_int3 159xen_pv_trap asm_xenpv_exc_nmi 160xen_pv_trap asm_exc_overflow 161xen_pv_trap asm_exc_bounds 162xen_pv_trap asm_exc_invalid_op 163xen_pv_trap asm_exc_device_not_available 164xen_pv_trap asm_exc_double_fault 165xen_pv_trap asm_exc_coproc_segment_overrun 166xen_pv_trap asm_exc_invalid_tss 167xen_pv_trap asm_exc_segment_not_present 168xen_pv_trap asm_exc_stack_segment 169xen_pv_trap asm_exc_general_protection 170xen_pv_trap asm_exc_page_fault 171xen_pv_trap asm_exc_spurious_interrupt_bug 172xen_pv_trap asm_exc_coprocessor_error 173xen_pv_trap asm_exc_alignment_check 174#ifdef CONFIG_X86_MCE 175xen_pv_trap asm_exc_machine_check 176#endif /* CONFIG_X86_MCE */ 177xen_pv_trap asm_exc_simd_coprocessor_error 178#ifdef CONFIG_IA32_EMULATION 179xen_pv_trap entry_INT80_compat 180#endif 181xen_pv_trap asm_exc_xen_unknown_trap 182xen_pv_trap asm_exc_xen_hypervisor_callback 183 184 __INIT 185SYM_CODE_START(xen_early_idt_handler_array) 186 i = 0 187 .rept NUM_EXCEPTION_VECTORS 188 pop %rcx 189 pop %r11 190 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE 191 i = i + 1 192 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 193 .endr 194SYM_CODE_END(xen_early_idt_handler_array) 195 __FINIT 196 197hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 198/* 199 * Xen64 iret frame: 200 * 201 * ss 202 * rsp 203 * rflags 204 * cs 205 * rip <-- standard iret frame 206 * 207 * flags 208 * 209 * rcx } 210 * r11 }<-- pushed by hypercall page 211 * rsp->rax } 212 */ 213SYM_CODE_START(xen_iret) 214 pushq $0 215 jmp hypercall_iret 216SYM_CODE_END(xen_iret) 217 218SYM_CODE_START(xen_sysret64) 219 /* 220 * We're already on the usermode stack at this point, but 221 * still with the kernel gs, so we can easily switch back. 222 * 223 * tss.sp2 is scratch space. 224 */ 225 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 226 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 227 228 pushq $__USER_DS 229 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 230 pushq %r11 231 pushq $__USER_CS 232 pushq %rcx 233 234 pushq $VGCF_in_syscall 235 jmp hypercall_iret 236SYM_CODE_END(xen_sysret64) 237 238/* 239 * Xen handles syscall callbacks much like ordinary exceptions, which 240 * means we have: 241 * - kernel gs 242 * - kernel rsp 243 * - an iret-like stack frame on the stack (including rcx and r11): 244 * ss 245 * rsp 246 * rflags 247 * cs 248 * rip 249 * r11 250 * rsp->rcx 251 */ 252 253/* Normal 64-bit system call target */ 254SYM_FUNC_START(xen_syscall_target) 255 popq %rcx 256 popq %r11 257 258 /* 259 * Neither Xen nor the kernel really knows what the old SS and 260 * CS were. The kernel expects __USER_DS and __USER_CS, so 261 * report those values even though Xen will guess its own values. 262 */ 263 movq $__USER_DS, 4*8(%rsp) 264 movq $__USER_CS, 1*8(%rsp) 265 266 jmp entry_SYSCALL_64_after_hwframe 267SYM_FUNC_END(xen_syscall_target) 268 269#ifdef CONFIG_IA32_EMULATION 270 271/* 32-bit compat syscall target */ 272SYM_FUNC_START(xen_syscall32_target) 273 popq %rcx 274 popq %r11 275 276 /* 277 * Neither Xen nor the kernel really knows what the old SS and 278 * CS were. The kernel expects __USER32_DS and __USER32_CS, so 279 * report those values even though Xen will guess its own values. 280 */ 281 movq $__USER32_DS, 4*8(%rsp) 282 movq $__USER32_CS, 1*8(%rsp) 283 284 jmp entry_SYSCALL_compat_after_hwframe 285SYM_FUNC_END(xen_syscall32_target) 286 287/* 32-bit compat sysenter target */ 288SYM_FUNC_START(xen_sysenter_target) 289 /* 290 * NB: Xen is polite and clears TF from EFLAGS for us. This means 291 * that we don't need to guard against single step exceptions here. 292 */ 293 popq %rcx 294 popq %r11 295 296 /* 297 * Neither Xen nor the kernel really knows what the old SS and 298 * CS were. The kernel expects __USER32_DS and __USER32_CS, so 299 * report those values even though Xen will guess its own values. 300 */ 301 movq $__USER32_DS, 4*8(%rsp) 302 movq $__USER32_CS, 1*8(%rsp) 303 304 jmp entry_SYSENTER_compat_after_hwframe 305SYM_FUNC_END(xen_sysenter_target) 306 307#else /* !CONFIG_IA32_EMULATION */ 308 309SYM_FUNC_START_ALIAS(xen_syscall32_target) 310SYM_FUNC_START(xen_sysenter_target) 311 lea 16(%rsp), %rsp /* strip %rcx, %r11 */ 312 mov $-ENOSYS, %rax 313 pushq $0 314 jmp hypercall_iret 315SYM_FUNC_END(xen_sysenter_target) 316SYM_FUNC_END_ALIAS(xen_syscall32_target) 317 318#endif /* CONFIG_IA32_EMULATION */ 319