1/*
2 * AT_SYSINFO entry point
3*/
4
5#include <linux/linkage.h>
6#include <asm/dwarf2.h>
7#include <asm/cpufeatures.h>
8#include <asm/alternative-asm.h>
9
10	.text
11	.globl __kernel_vsyscall
12	.type __kernel_vsyscall,@function
13	ALIGN
14__kernel_vsyscall:
15	CFI_STARTPROC
16	/*
17	 * Reshuffle regs so that all of any of the entry instructions
18	 * will preserve enough state.
19	 *
20	 * A really nice entry sequence would be:
21	 *  pushl %edx
22	 *  pushl %ecx
23	 *  movl  %esp, %ecx
24	 *
25	 * Unfortunately, naughty Android versions between July and December
26	 * 2015 actually hardcode the traditional Linux SYSENTER entry
27	 * sequence.  That is severely broken for a number of reasons (ask
28	 * anyone with an AMD CPU, for example).  Nonetheless, we try to keep
29	 * it working approximately as well as it ever worked.
30	 *
31	 * This link may eludicate some of the history:
32	 *   https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
33	 * personally, I find it hard to understand what's going on there.
34	 *
35	 * Note to future user developers: DO NOT USE SYSENTER IN YOUR CODE.
36	 * Execute an indirect call to the address in the AT_SYSINFO auxv
37	 * entry.  That is the ONLY correct way to make a fast 32-bit system
38	 * call on Linux.  (Open-coding int $0x80 is also fine, but it's
39	 * slow.)
40	 */
41	pushl	%ecx
42	CFI_ADJUST_CFA_OFFSET	4
43	CFI_REL_OFFSET		ecx, 0
44	pushl	%edx
45	CFI_ADJUST_CFA_OFFSET	4
46	CFI_REL_OFFSET		edx, 0
47	pushl	%ebp
48	CFI_ADJUST_CFA_OFFSET	4
49	CFI_REL_OFFSET		ebp, 0
50
51	#define SYSENTER_SEQUENCE	"movl %esp, %ebp; sysenter"
52	#define SYSCALL_SEQUENCE	"movl %ecx, %ebp; syscall"
53
54#ifdef CONFIG_X86_64
55	/* If SYSENTER (Intel) or SYSCALL32 (AMD) is available, use it. */
56	ALTERNATIVE_2 "", SYSENTER_SEQUENCE, X86_FEATURE_SYSENTER32, \
57	                  SYSCALL_SEQUENCE,  X86_FEATURE_SYSCALL32
58#else
59	ALTERNATIVE "", SYSENTER_SEQUENCE, X86_FEATURE_SEP
60#endif
61
62	/* Enter using int $0x80 */
63	int	$0x80
64GLOBAL(int80_landing_pad)
65
66	/*
67	 * Restore EDX and ECX in case they were clobbered.  EBP is not
68	 * clobbered (the kernel restores it), but it's cleaner and
69	 * probably faster to pop it than to adjust ESP using addl.
70	 */
71	popl	%ebp
72	CFI_RESTORE		ebp
73	CFI_ADJUST_CFA_OFFSET	-4
74	popl	%edx
75	CFI_RESTORE		edx
76	CFI_ADJUST_CFA_OFFSET	-4
77	popl	%ecx
78	CFI_RESTORE		ecx
79	CFI_ADJUST_CFA_OFFSET	-4
80	ret
81	CFI_ENDPROC
82
83	.size __kernel_vsyscall,.-__kernel_vsyscall
84	.previous
85