1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Asm versions of Xen pv-ops, suitable for direct use. 4 * 5 * We only bother with direct forms (ie, vcpu in percpu data) of the 6 * operations here; the indirect forms are better handled in C. 7 */ 8 9#include <asm/errno.h> 10#include <asm/asm-offsets.h> 11#include <asm/percpu.h> 12#include <asm/processor-flags.h> 13#include <asm/segment.h> 14#include <asm/thread_info.h> 15#include <asm/asm.h> 16#include <asm/frame.h> 17 18#include <xen/interface/xen.h> 19 20#include <linux/init.h> 21#include <linux/linkage.h> 22 23/* 24 * Enable events. This clears the event mask and tests the pending 25 * event status with one and operation. If there are pending events, 26 * then enter the hypervisor to get them handled. 27 */ 28SYM_FUNC_START(xen_irq_enable_direct) 29 FRAME_BEGIN 30 /* Unmask events */ 31 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 32 33 /* 34 * Preempt here doesn't matter because that will deal with any 35 * pending interrupts. The pending check may end up being run 36 * on the wrong CPU, but that doesn't hurt. 37 */ 38 39 /* Test for pending */ 40 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 41 jz 1f 42 43 call check_events 441: 45 FRAME_END 46 ret 47SYM_FUNC_END(xen_irq_enable_direct) 48 49 50/* 51 * Disabling events is simply a matter of making the event mask 52 * non-zero. 53 */ 54SYM_FUNC_START(xen_irq_disable_direct) 55 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 56 ret 57SYM_FUNC_END(xen_irq_disable_direct) 58 59/* 60 * (xen_)save_fl is used to get the current interrupt enable status. 61 * Callers expect the status to be in X86_EFLAGS_IF, and other bits 62 * may be set in the return value. We take advantage of this by 63 * making sure that X86_EFLAGS_IF has the right value (and other bits 64 * in that byte are 0), but other bits in the return value are 65 * undefined. We need to toggle the state of the bit, because Xen and 66 * x86 use opposite senses (mask vs enable). 67 */ 68SYM_FUNC_START(xen_save_fl_direct) 69 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 70 setz %ah 71 addb %ah, %ah 72 ret 73SYM_FUNC_END(xen_save_fl_direct) 74 75 76/* 77 * In principle the caller should be passing us a value return from 78 * xen_save_fl_direct, but for robustness sake we test only the 79 * X86_EFLAGS_IF flag rather than the whole byte. After setting the 80 * interrupt mask state, it checks for unmasked pending events and 81 * enters the hypervisor to get them delivered if so. 82 */ 83SYM_FUNC_START(xen_restore_fl_direct) 84 FRAME_BEGIN 85 testw $X86_EFLAGS_IF, %di 86 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask 87 /* 88 * Preempt here doesn't matter because that will deal with any 89 * pending interrupts. The pending check may end up being run 90 * on the wrong CPU, but that doesn't hurt. 91 */ 92 93 /* check for unmasked and pending */ 94 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending 95 jnz 1f 96 call check_events 971: 98 FRAME_END 99 ret 100SYM_FUNC_END(xen_restore_fl_direct) 101 102 103/* 104 * Force an event check by making a hypercall, but preserve regs 105 * before making the call. 106 */ 107SYM_FUNC_START(check_events) 108 FRAME_BEGIN 109 push %rax 110 push %rcx 111 push %rdx 112 push %rsi 113 push %rdi 114 push %r8 115 push %r9 116 push %r10 117 push %r11 118 call xen_force_evtchn_callback 119 pop %r11 120 pop %r10 121 pop %r9 122 pop %r8 123 pop %rdi 124 pop %rsi 125 pop %rdx 126 pop %rcx 127 pop %rax 128 FRAME_END 129 ret 130SYM_FUNC_END(check_events) 131 132SYM_FUNC_START(xen_read_cr2) 133 FRAME_BEGIN 134 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX 135 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX 136 FRAME_END 137 ret 138SYM_FUNC_END(xen_read_cr2); 139 140SYM_FUNC_START(xen_read_cr2_direct) 141 FRAME_BEGIN 142 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX 143 FRAME_END 144 ret 145SYM_FUNC_END(xen_read_cr2_direct); 146 147.macro xen_pv_trap name 148SYM_CODE_START(xen_\name) 149 pop %rcx 150 pop %r11 151 jmp \name 152SYM_CODE_END(xen_\name) 153_ASM_NOKPROBE(xen_\name) 154.endm 155 156xen_pv_trap asm_exc_divide_error 157xen_pv_trap asm_xenpv_exc_debug 158xen_pv_trap asm_exc_int3 159xen_pv_trap asm_xenpv_exc_nmi 160xen_pv_trap asm_exc_overflow 161xen_pv_trap asm_exc_bounds 162xen_pv_trap asm_exc_invalid_op 163xen_pv_trap asm_exc_device_not_available 164xen_pv_trap asm_exc_double_fault 165xen_pv_trap asm_exc_coproc_segment_overrun 166xen_pv_trap asm_exc_invalid_tss 167xen_pv_trap asm_exc_segment_not_present 168xen_pv_trap asm_exc_stack_segment 169xen_pv_trap asm_exc_general_protection 170xen_pv_trap asm_exc_page_fault 171xen_pv_trap asm_exc_spurious_interrupt_bug 172xen_pv_trap asm_exc_coprocessor_error 173xen_pv_trap asm_exc_alignment_check 174#ifdef CONFIG_X86_MCE 175xen_pv_trap asm_exc_machine_check 176#endif /* CONFIG_X86_MCE */ 177xen_pv_trap asm_exc_simd_coprocessor_error 178#ifdef CONFIG_IA32_EMULATION 179xen_pv_trap entry_INT80_compat 180#endif 181xen_pv_trap asm_exc_xen_hypervisor_callback 182 183 __INIT 184SYM_CODE_START(xen_early_idt_handler_array) 185 i = 0 186 .rept NUM_EXCEPTION_VECTORS 187 pop %rcx 188 pop %r11 189 jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE 190 i = i + 1 191 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc 192 .endr 193SYM_CODE_END(xen_early_idt_handler_array) 194 __FINIT 195 196hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 197/* 198 * Xen64 iret frame: 199 * 200 * ss 201 * rsp 202 * rflags 203 * cs 204 * rip <-- standard iret frame 205 * 206 * flags 207 * 208 * rcx } 209 * r11 }<-- pushed by hypercall page 210 * rsp->rax } 211 */ 212SYM_CODE_START(xen_iret) 213 pushq $0 214 jmp hypercall_iret 215SYM_CODE_END(xen_iret) 216 217SYM_CODE_START(xen_sysret64) 218 /* 219 * We're already on the usermode stack at this point, but 220 * still with the kernel gs, so we can easily switch back. 221 * 222 * tss.sp2 is scratch space. 223 */ 224 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 225 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 226 227 pushq $__USER_DS 228 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) 229 pushq %r11 230 pushq $__USER_CS 231 pushq %rcx 232 233 pushq $VGCF_in_syscall 234 jmp hypercall_iret 235SYM_CODE_END(xen_sysret64) 236 237/* 238 * Xen handles syscall callbacks much like ordinary exceptions, which 239 * means we have: 240 * - kernel gs 241 * - kernel rsp 242 * - an iret-like stack frame on the stack (including rcx and r11): 243 * ss 244 * rsp 245 * rflags 246 * cs 247 * rip 248 * r11 249 * rsp->rcx 250 */ 251 252/* Normal 64-bit system call target */ 253SYM_FUNC_START(xen_syscall_target) 254 popq %rcx 255 popq %r11 256 257 /* 258 * Neither Xen nor the kernel really knows what the old SS and 259 * CS were. The kernel expects __USER_DS and __USER_CS, so 260 * report those values even though Xen will guess its own values. 261 */ 262 movq $__USER_DS, 4*8(%rsp) 263 movq $__USER_CS, 1*8(%rsp) 264 265 jmp entry_SYSCALL_64_after_hwframe 266SYM_FUNC_END(xen_syscall_target) 267 268#ifdef CONFIG_IA32_EMULATION 269 270/* 32-bit compat syscall target */ 271SYM_FUNC_START(xen_syscall32_target) 272 popq %rcx 273 popq %r11 274 275 /* 276 * Neither Xen nor the kernel really knows what the old SS and 277 * CS were. The kernel expects __USER32_DS and __USER32_CS, so 278 * report those values even though Xen will guess its own values. 279 */ 280 movq $__USER32_DS, 4*8(%rsp) 281 movq $__USER32_CS, 1*8(%rsp) 282 283 jmp entry_SYSCALL_compat_after_hwframe 284SYM_FUNC_END(xen_syscall32_target) 285 286/* 32-bit compat sysenter target */ 287SYM_FUNC_START(xen_sysenter_target) 288 /* 289 * NB: Xen is polite and clears TF from EFLAGS for us. This means 290 * that we don't need to guard against single step exceptions here. 291 */ 292 popq %rcx 293 popq %r11 294 295 /* 296 * Neither Xen nor the kernel really knows what the old SS and 297 * CS were. The kernel expects __USER32_DS and __USER32_CS, so 298 * report those values even though Xen will guess its own values. 299 */ 300 movq $__USER32_DS, 4*8(%rsp) 301 movq $__USER32_CS, 1*8(%rsp) 302 303 jmp entry_SYSENTER_compat_after_hwframe 304SYM_FUNC_END(xen_sysenter_target) 305 306#else /* !CONFIG_IA32_EMULATION */ 307 308SYM_FUNC_START_ALIAS(xen_syscall32_target) 309SYM_FUNC_START(xen_sysenter_target) 310 lea 16(%rsp), %rsp /* strip %rcx, %r11 */ 311 mov $-ENOSYS, %rax 312 pushq $0 313 jmp hypercall_iret 314SYM_FUNC_END(xen_sysenter_target) 315SYM_FUNC_END_ALIAS(xen_syscall32_target) 316 317#endif /* CONFIG_IA32_EMULATION */ 318