1/*
2 * Code for the vDSO.  This version uses the old int $0x80 method.
3*/
4
5#include <asm/dwarf2.h>
6#include <asm/cpufeature.h>
7#include <asm/alternative-asm.h>
8
9/*
10 * First get the common code for the sigreturn entry points.
11 * This must come first.
12 */
13#include "sigreturn.S"
14
15	.text
16	.globl __kernel_vsyscall
17	.type __kernel_vsyscall,@function
18	ALIGN
19__kernel_vsyscall:
20	CFI_STARTPROC
21	/*
22	 * Reshuffle regs so that all of any of the entry instructions
23	 * will preserve enough state.
24	 */
25	pushl	%edx
26	CFI_ADJUST_CFA_OFFSET	4
27	CFI_REL_OFFSET		edx, 0
28	pushl	%ecx
29	CFI_ADJUST_CFA_OFFSET	4
30	CFI_REL_OFFSET		ecx, 0
31	movl	%esp, %ecx
32
33#ifdef CONFIG_X86_64
34	/* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
35	ALTERNATIVE_2 "", "sysenter", X86_FEATURE_SYSENTER32, \
36	                  "syscall",  X86_FEATURE_SYSCALL32
37#else
38	ALTERNATIVE "", "sysenter", X86_FEATURE_SEP
39#endif
40
41	/* Enter using int $0x80 */
42	movl	(%esp), %ecx
43	int	$0x80
44GLOBAL(int80_landing_pad)
45
46	/* Restore ECX and EDX in case they were clobbered. */
47	popl	%ecx
48	CFI_RESTORE		ecx
49	CFI_ADJUST_CFA_OFFSET	-4
50	popl	%edx
51	CFI_RESTORE		edx
52	CFI_ADJUST_CFA_OFFSET	-4
53	ret
54	CFI_ENDPROC
55
56	.size __kernel_vsyscall,.-__kernel_vsyscall
57	.previous
58