xref: /openbmc/linux/arch/x86/xen/xen-asm.S (revision ab234a26)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Asm versions of Xen pv-ops, suitable for direct use.
4 *
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
7 */
8
9#include <asm/errno.h>
10#include <asm/asm-offsets.h>
11#include <asm/percpu.h>
12#include <asm/processor-flags.h>
13#include <asm/segment.h>
14#include <asm/thread_info.h>
15#include <asm/asm.h>
16#include <asm/frame.h>
17
18#include <xen/interface/xen.h>
19
20#include <linux/init.h>
21#include <linux/linkage.h>
22
23/*
24 * Enable events.  This clears the event mask and tests the pending
25 * event status with one and operation.  If there are pending events,
26 * then enter the hypervisor to get them handled.
27 */
28SYM_FUNC_START(xen_irq_enable_direct)
29	FRAME_BEGIN
30	/* Unmask events */
31	movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
32
33	/*
34	 * Preempt here doesn't matter because that will deal with any
35	 * pending interrupts.  The pending check may end up being run
36	 * on the wrong CPU, but that doesn't hurt.
37	 */
38
39	/* Test for pending */
40	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
41	jz 1f
42
43	call check_events
441:
45	FRAME_END
46	ret
47SYM_FUNC_END(xen_irq_enable_direct)
48
49
50/*
51 * Disabling events is simply a matter of making the event mask
52 * non-zero.
53 */
54SYM_FUNC_START(xen_irq_disable_direct)
55	movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
56	ret
57SYM_FUNC_END(xen_irq_disable_direct)
58
59/*
60 * (xen_)save_fl is used to get the current interrupt enable status.
61 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
62 * may be set in the return value.  We take advantage of this by
63 * making sure that X86_EFLAGS_IF has the right value (and other bits
64 * in that byte are 0), but other bits in the return value are
65 * undefined.  We need to toggle the state of the bit, because Xen and
66 * x86 use opposite senses (mask vs enable).
67 */
68SYM_FUNC_START(xen_save_fl_direct)
69	testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
70	setz %ah
71	addb %ah, %ah
72	ret
73SYM_FUNC_END(xen_save_fl_direct)
74
75/*
76 * Force an event check by making a hypercall, but preserve regs
77 * before making the call.
78 */
79SYM_FUNC_START(check_events)
80	FRAME_BEGIN
81	push %rax
82	push %rcx
83	push %rdx
84	push %rsi
85	push %rdi
86	push %r8
87	push %r9
88	push %r10
89	push %r11
90	call xen_force_evtchn_callback
91	pop %r11
92	pop %r10
93	pop %r9
94	pop %r8
95	pop %rdi
96	pop %rsi
97	pop %rdx
98	pop %rcx
99	pop %rax
100	FRAME_END
101	ret
102SYM_FUNC_END(check_events)
103
104SYM_FUNC_START(xen_read_cr2)
105	FRAME_BEGIN
106	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
107	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
108	FRAME_END
109	ret
110SYM_FUNC_END(xen_read_cr2);
111
112SYM_FUNC_START(xen_read_cr2_direct)
113	FRAME_BEGIN
114	_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
115	FRAME_END
116	ret
117SYM_FUNC_END(xen_read_cr2_direct);
118
119.macro xen_pv_trap name
120SYM_CODE_START(xen_\name)
121	pop %rcx
122	pop %r11
123	jmp  \name
124SYM_CODE_END(xen_\name)
125_ASM_NOKPROBE(xen_\name)
126.endm
127
128xen_pv_trap asm_exc_divide_error
129xen_pv_trap asm_xenpv_exc_debug
130xen_pv_trap asm_exc_int3
131xen_pv_trap asm_xenpv_exc_nmi
132xen_pv_trap asm_exc_overflow
133xen_pv_trap asm_exc_bounds
134xen_pv_trap asm_exc_invalid_op
135xen_pv_trap asm_exc_device_not_available
136xen_pv_trap asm_xenpv_exc_double_fault
137xen_pv_trap asm_exc_coproc_segment_overrun
138xen_pv_trap asm_exc_invalid_tss
139xen_pv_trap asm_exc_segment_not_present
140xen_pv_trap asm_exc_stack_segment
141xen_pv_trap asm_exc_general_protection
142xen_pv_trap asm_exc_page_fault
143xen_pv_trap asm_exc_spurious_interrupt_bug
144xen_pv_trap asm_exc_coprocessor_error
145xen_pv_trap asm_exc_alignment_check
146#ifdef CONFIG_X86_MCE
147xen_pv_trap asm_xenpv_exc_machine_check
148#endif /* CONFIG_X86_MCE */
149xen_pv_trap asm_exc_simd_coprocessor_error
150#ifdef CONFIG_IA32_EMULATION
151xen_pv_trap entry_INT80_compat
152#endif
153xen_pv_trap asm_exc_xen_unknown_trap
154xen_pv_trap asm_exc_xen_hypervisor_callback
155
156	__INIT
157SYM_CODE_START(xen_early_idt_handler_array)
158	i = 0
159	.rept NUM_EXCEPTION_VECTORS
160	pop %rcx
161	pop %r11
162	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
163	i = i + 1
164	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
165	.endr
166SYM_CODE_END(xen_early_idt_handler_array)
167	__FINIT
168
169hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
170/*
171 * Xen64 iret frame:
172 *
173 *	ss
174 *	rsp
175 *	rflags
176 *	cs
177 *	rip		<-- standard iret frame
178 *
179 *	flags
180 *
181 *	rcx		}
182 *	r11		}<-- pushed by hypercall page
183 * rsp->rax		}
184 */
185SYM_CODE_START(xen_iret)
186	pushq $0
187	jmp hypercall_iret
188SYM_CODE_END(xen_iret)
189
190/*
191 * Xen handles syscall callbacks much like ordinary exceptions, which
192 * means we have:
193 * - kernel gs
194 * - kernel rsp
195 * - an iret-like stack frame on the stack (including rcx and r11):
196 *	ss
197 *	rsp
198 *	rflags
199 *	cs
200 *	rip
201 *	r11
202 * rsp->rcx
203 */
204
205/* Normal 64-bit system call target */
206SYM_FUNC_START(xen_syscall_target)
207	popq %rcx
208	popq %r11
209
210	/*
211	 * Neither Xen nor the kernel really knows what the old SS and
212	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
213	 * report those values even though Xen will guess its own values.
214	 */
215	movq $__USER_DS, 4*8(%rsp)
216	movq $__USER_CS, 1*8(%rsp)
217
218	jmp entry_SYSCALL_64_after_hwframe
219SYM_FUNC_END(xen_syscall_target)
220
221#ifdef CONFIG_IA32_EMULATION
222
223/* 32-bit compat syscall target */
224SYM_FUNC_START(xen_syscall32_target)
225	popq %rcx
226	popq %r11
227
228	/*
229	 * Neither Xen nor the kernel really knows what the old SS and
230	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
231	 * report those values even though Xen will guess its own values.
232	 */
233	movq $__USER32_DS, 4*8(%rsp)
234	movq $__USER32_CS, 1*8(%rsp)
235
236	jmp entry_SYSCALL_compat_after_hwframe
237SYM_FUNC_END(xen_syscall32_target)
238
239/* 32-bit compat sysenter target */
240SYM_FUNC_START(xen_sysenter_target)
241	/*
242	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
243	 * that we don't need to guard against single step exceptions here.
244	 */
245	popq %rcx
246	popq %r11
247
248	/*
249	 * Neither Xen nor the kernel really knows what the old SS and
250	 * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
251	 * report those values even though Xen will guess its own values.
252	 */
253	movq $__USER32_DS, 4*8(%rsp)
254	movq $__USER32_CS, 1*8(%rsp)
255
256	jmp entry_SYSENTER_compat_after_hwframe
257SYM_FUNC_END(xen_sysenter_target)
258
259#else /* !CONFIG_IA32_EMULATION */
260
261SYM_FUNC_START_ALIAS(xen_syscall32_target)
262SYM_FUNC_START(xen_sysenter_target)
263	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
264	mov $-ENOSYS, %rax
265	pushq $0
266	jmp hypercall_iret
267SYM_FUNC_END(xen_sysenter_target)
268SYM_FUNC_END_ALIAS(xen_syscall32_target)
269
270#endif	/* CONFIG_IA32_EMULATION */
271