1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_ELF_H
31965aae3SH. Peter Anvin #define _ASM_X86_ELF_H
4bb898558SAl Viro
5bb898558SAl Viro /*
6bb898558SAl Viro * ELF register definitions..
7bb898558SAl Viro */
8dfb09f9bSBorislav Petkov #include <linux/thread_info.h>
9bb898558SAl Viro
10bb898558SAl Viro #include <asm/ptrace.h>
11bb898558SAl Viro #include <asm/user.h>
12bb898558SAl Viro #include <asm/auxvec.h>
13824eea38SChang S. Bae #include <asm/fsgsbase.h>
14bb898558SAl Viro
15bb898558SAl Viro typedef unsigned long elf_greg_t;
16bb898558SAl Viro
17bb898558SAl Viro #define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
18bb898558SAl Viro typedef elf_greg_t elf_gregset_t[ELF_NGREG];
19bb898558SAl Viro
20bb898558SAl Viro typedef struct user_i387_struct elf_fpregset_t;
21bb898558SAl Viro
22bb898558SAl Viro #ifdef __i386__
23bb898558SAl Viro
24bb898558SAl Viro #define R_386_NONE 0
25bb898558SAl Viro #define R_386_32 1
26bb898558SAl Viro #define R_386_PC32 2
27bb898558SAl Viro #define R_386_GOT32 3
28bb898558SAl Viro #define R_386_PLT32 4
29bb898558SAl Viro #define R_386_COPY 5
30bb898558SAl Viro #define R_386_GLOB_DAT 6
31bb898558SAl Viro #define R_386_JMP_SLOT 7
32bb898558SAl Viro #define R_386_RELATIVE 8
33bb898558SAl Viro #define R_386_GOTOFF 9
34bb898558SAl Viro #define R_386_GOTPC 10
35bb898558SAl Viro #define R_386_NUM 11
36bb898558SAl Viro
37bb898558SAl Viro /*
38bb898558SAl Viro * These are used to set parameters in the core dumps.
39bb898558SAl Viro */
40bb898558SAl Viro #define ELF_CLASS ELFCLASS32
41bb898558SAl Viro #define ELF_DATA ELFDATA2LSB
42bb898558SAl Viro #define ELF_ARCH EM_386
43bb898558SAl Viro
44bb898558SAl Viro #else
45bb898558SAl Viro
46bb898558SAl Viro /* x86-64 relocation types */
47bb898558SAl Viro #define R_X86_64_NONE 0 /* No reloc */
48bb898558SAl Viro #define R_X86_64_64 1 /* Direct 64 bit */
49bb898558SAl Viro #define R_X86_64_PC32 2 /* PC relative 32 bit signed */
50bb898558SAl Viro #define R_X86_64_GOT32 3 /* 32 bit GOT entry */
51bb898558SAl Viro #define R_X86_64_PLT32 4 /* 32 bit PLT address */
52bb898558SAl Viro #define R_X86_64_COPY 5 /* Copy symbol at runtime */
53bb898558SAl Viro #define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
54bb898558SAl Viro #define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
55bb898558SAl Viro #define R_X86_64_RELATIVE 8 /* Adjust by program base */
56bb898558SAl Viro #define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
57bb898558SAl Viro offset to GOT */
58bb898558SAl Viro #define R_X86_64_32 10 /* Direct 32 bit zero extended */
59bb898558SAl Viro #define R_X86_64_32S 11 /* Direct 32 bit sign extended */
60bb898558SAl Viro #define R_X86_64_16 12 /* Direct 16 bit zero extended */
61bb898558SAl Viro #define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
62bb898558SAl Viro #define R_X86_64_8 14 /* Direct 8 bit sign extended */
63bb898558SAl Viro #define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
64b40a142bSArd Biesheuvel #define R_X86_64_PC64 24 /* Place relative 64-bit signed */
65bb898558SAl Viro
66bb898558SAl Viro /*
67bb898558SAl Viro * These are used to set parameters in the core dumps.
68bb898558SAl Viro */
69bb898558SAl Viro #define ELF_CLASS ELFCLASS64
70bb898558SAl Viro #define ELF_DATA ELFDATA2LSB
71bb898558SAl Viro #define ELF_ARCH EM_X86_64
72bb898558SAl Viro
73bb898558SAl Viro #endif
74bb898558SAl Viro
75bb898558SAl Viro #include <asm/vdso.h>
76bb898558SAl Viro
773d7ee969SAndy Lutomirski #ifdef CONFIG_X86_64
783d7ee969SAndy Lutomirski extern unsigned int vdso64_enabled;
793d7ee969SAndy Lutomirski #endif
80ab8b82eeSBrian Gerst #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
813d7ee969SAndy Lutomirski extern unsigned int vdso32_enabled;
823d7ee969SAndy Lutomirski #endif
83bb898558SAl Viro
84bb898558SAl Viro /*
85bb898558SAl Viro * This is used to ensure we don't load something for the wrong architecture.
86bb898558SAl Viro */
87bb898558SAl Viro #define elf_check_arch_ia32(x) \
88bb898558SAl Viro (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
89bb898558SAl Viro
90bb898558SAl Viro #include <asm/processor.h>
91bb898558SAl Viro
92bb898558SAl Viro #ifdef CONFIG_X86_32
93bb898558SAl Viro #include <asm/desc.h>
94bb898558SAl Viro
95bb898558SAl Viro #define elf_check_arch(x) elf_check_arch_ia32(x)
96bb898558SAl Viro
97bb898558SAl Viro /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
98bb898558SAl Viro contains a pointer to a function which might be registered using `atexit'.
99bb898558SAl Viro This provides a mean for the dynamic linker to call DT_FINI functions for
100bb898558SAl Viro shared libraries that have been loaded before the code runs.
101bb898558SAl Viro
102bb898558SAl Viro A value of 0 tells we have no such handler.
103bb898558SAl Viro
104bb898558SAl Viro We might as well make sure everything else is cleared too (except for %esp),
105bb898558SAl Viro just to make things more deterministic.
106bb898558SAl Viro */
107bb898558SAl Viro #define ELF_PLAT_INIT(_r, load_addr) \
108bb898558SAl Viro do { \
109bb898558SAl Viro _r->bx = 0; _r->cx = 0; _r->dx = 0; \
110bb898558SAl Viro _r->si = 0; _r->di = 0; _r->bp = 0; \
111bb898558SAl Viro _r->ax = 0; \
112bb898558SAl Viro } while (0)
113bb898558SAl Viro
114bb898558SAl Viro /*
115bb898558SAl Viro * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
116bb898558SAl Viro * now struct_user_regs, they are different)
117bb898558SAl Viro */
118bb898558SAl Viro
119*f5d9283eSBrian Gerst #define ELF_CORE_COPY_REGS(pr_reg, regs) \
120bb898558SAl Viro do { \
121bb898558SAl Viro pr_reg[0] = regs->bx; \
122bb898558SAl Viro pr_reg[1] = regs->cx; \
123bb898558SAl Viro pr_reg[2] = regs->dx; \
124bb898558SAl Viro pr_reg[3] = regs->si; \
125bb898558SAl Viro pr_reg[4] = regs->di; \
126bb898558SAl Viro pr_reg[5] = regs->bp; \
127bb898558SAl Viro pr_reg[6] = regs->ax; \
12899504819SAndy Lutomirski pr_reg[7] = regs->ds; \
12999504819SAndy Lutomirski pr_reg[8] = regs->es; \
13099504819SAndy Lutomirski pr_reg[9] = regs->fs; \
131*f5d9283eSBrian Gerst savesegment(gs, pr_reg[10]); \
132bb898558SAl Viro pr_reg[11] = regs->orig_ax; \
133bb898558SAl Viro pr_reg[12] = regs->ip; \
13499504819SAndy Lutomirski pr_reg[13] = regs->cs; \
135bb898558SAl Viro pr_reg[14] = regs->flags; \
136bb898558SAl Viro pr_reg[15] = regs->sp; \
13799504819SAndy Lutomirski pr_reg[16] = regs->ss; \
138bb898558SAl Viro } while (0);
139bb898558SAl Viro
140bb898558SAl Viro #define ELF_PLATFORM (utsname()->machine)
141bb898558SAl Viro #define set_personality_64bit() do { } while (0)
142bb898558SAl Viro
143bb898558SAl Viro #else /* CONFIG_X86_32 */
144bb898558SAl Viro
145bb898558SAl Viro /*
146bb898558SAl Viro * This is used to ensure we don't load something for the wrong architecture.
147bb898558SAl Viro */
148bb898558SAl Viro #define elf_check_arch(x) \
149bb898558SAl Viro ((x)->e_machine == EM_X86_64)
150bb898558SAl Viro
151d1a797f3SH. Peter Anvin #define compat_elf_check_arch(x) \
1520e6d3112SBen Hutchings (elf_check_arch_ia32(x) || \
1530e6d3112SBen Hutchings (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
154d1a797f3SH. Peter Anvin
elf_common_init(struct thread_struct * t,struct pt_regs * regs,const u16 ds)155d1a797f3SH. Peter Anvin static inline void elf_common_init(struct thread_struct *t,
156d1a797f3SH. Peter Anvin struct pt_regs *regs, const u16 ds)
157d1a797f3SH. Peter Anvin {
158bb898558SAl Viro /* ax gets execve's return value. */
159bb898558SAl Viro /*regs->ax = */ regs->bx = regs->cx = regs->dx = 0;
160bb898558SAl Viro regs->si = regs->di = regs->bp = 0;
161bb898558SAl Viro regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
1627bcdea4dSAndy Lutomirski regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
1637bcdea4dSAndy Lutomirski t->fsbase = t->gsbase = 0;
1647bcdea4dSAndy Lutomirski t->fsindex = t->gsindex = 0;
165bb898558SAl Viro t->ds = t->es = ds;
1667bcdea4dSAndy Lutomirski }
167296f781aSAndy Lutomirski
168bb898558SAl Viro #define ELF_PLAT_INIT(_r, load_addr) \
169bb898558SAl Viro elf_common_init(¤t->thread, _r, 0)
170bb898558SAl Viro
171bb898558SAl Viro #define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
172bb898558SAl Viro elf_common_init(¤t->thread, regs, __USER_DS)
17311557b24SOleg Nesterov
174bb898558SAl Viro void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32);
175bb898558SAl Viro #define COMPAT_START_THREAD(ex, regs, new_ip, new_sp) \
176bb898558SAl Viro compat_start_thread(regs, new_ip, new_sp, ex->e_machine == EM_X86_64)
177bb898558SAl Viro
1782424b146SGabriel Krisman Bertazi void set_personality_ia32(bool);
1792424b146SGabriel Krisman Bertazi #define COMPAT_SET_PERSONALITY(ex) \
1802424b146SGabriel Krisman Bertazi set_personality_ia32((ex).e_machine == EM_X86_64)
181bb898558SAl Viro
182d1a797f3SH. Peter Anvin #define COMPAT_ELF_PLATFORM ("i686")
183d1a797f3SH. Peter Anvin
184d1a797f3SH. Peter Anvin /*
185bb898558SAl Viro * regs is struct pt_regs, pr_reg is elf_gregset_t (which is
186bb898558SAl Viro * now struct_user_regs, they are different). Assumes current is the process
187bb898558SAl Viro * getting dumped.
188bb898558SAl Viro */
189bb898558SAl Viro
190bb898558SAl Viro #define ELF_CORE_COPY_REGS(pr_reg, regs) \
191bb898558SAl Viro do { \
192bb898558SAl Viro unsigned v; \
193bb898558SAl Viro (pr_reg)[0] = (regs)->r15; \
194bb898558SAl Viro (pr_reg)[1] = (regs)->r14; \
195bb898558SAl Viro (pr_reg)[2] = (regs)->r13; \
196bb898558SAl Viro (pr_reg)[3] = (regs)->r12; \
197bb898558SAl Viro (pr_reg)[4] = (regs)->bp; \
198bb898558SAl Viro (pr_reg)[5] = (regs)->bx; \
199bb898558SAl Viro (pr_reg)[6] = (regs)->r11; \
200bb898558SAl Viro (pr_reg)[7] = (regs)->r10; \
201bb898558SAl Viro (pr_reg)[8] = (regs)->r9; \
202bb898558SAl Viro (pr_reg)[9] = (regs)->r8; \
203bb898558SAl Viro (pr_reg)[10] = (regs)->ax; \
204bb898558SAl Viro (pr_reg)[11] = (regs)->cx; \
205bb898558SAl Viro (pr_reg)[12] = (regs)->dx; \
206bb898558SAl Viro (pr_reg)[13] = (regs)->si; \
207bb898558SAl Viro (pr_reg)[14] = (regs)->di; \
208bb898558SAl Viro (pr_reg)[15] = (regs)->orig_ax; \
209bb898558SAl Viro (pr_reg)[16] = (regs)->ip; \
210bb898558SAl Viro (pr_reg)[17] = (regs)->cs; \
211bb898558SAl Viro (pr_reg)[18] = (regs)->flags; \
212bb898558SAl Viro (pr_reg)[19] = (regs)->sp; \
213bb898558SAl Viro (pr_reg)[20] = (regs)->ss; \
214bb898558SAl Viro (pr_reg)[21] = x86_fsbase_read_cpu(); \
215bb898558SAl Viro (pr_reg)[22] = x86_gsbase_read_cpu_inactive(); \
216bb898558SAl Viro asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
217bb898558SAl Viro asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
218824eea38SChang S. Bae asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
219824eea38SChang S. Bae asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
220bb898558SAl Viro } while (0);
221bb898558SAl Viro
222bb898558SAl Viro /* I'm not sure if we can use '-' here */
223bb898558SAl Viro #define ELF_PLATFORM ("x86_64")
224bb898558SAl Viro extern void set_personality_64bit(void);
225bb898558SAl Viro extern int force_personality32;
226bb898558SAl Viro
227bb898558SAl Viro #endif /* !CONFIG_X86_32 */
228bb898558SAl Viro
229bb898558SAl Viro #define CORE_DUMP_USE_REGSET
230bb898558SAl Viro #define ELF_EXEC_PAGESIZE 4096
231bb898558SAl Viro
232bb898558SAl Viro /*
233bb898558SAl Viro * This is the base location for PIE (ET_DYN with INTERP) loads. On
234bb898558SAl Viro * 64-bit, this is above 4GB to leave the entire 32-bit address
235bb898558SAl Viro * space open for things that want to use the area for 32-bit pointers.
236eab09532SKees Cook */
237eab09532SKees Cook #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
238c715b72cSKees Cook (DEFAULT_MAP_WINDOW / 3 * 2))
239eab09532SKees Cook
240eab09532SKees Cook /* This yields a mask that user programs can use to figure out what
241eab09532SKees Cook instruction set this CPU supports. This could be done in user space,
242be739f4bSKirill A. Shutemov but it's not easy, and we've already done it here. */
243bb898558SAl Viro
244bb898558SAl Viro #define ELF_HWCAP (boot_cpu_data.x86_capability[CPUID_1_EDX])
245bb898558SAl Viro
246bb898558SAl Viro extern u32 elf_hwcap2;
247bb898558SAl Viro
24816aaa537SHuaitong Han /*
249bb898558SAl Viro * HWCAP2 supplies mask with kernel enabled CPU features, so that
2500274f955SGrzegorz Andrejczuk * the application can discover that it can safely use them.
2510274f955SGrzegorz Andrejczuk * The bits are defined in uapi/asm/hwcap2.h.
2520274f955SGrzegorz Andrejczuk */
2530274f955SGrzegorz Andrejczuk #define ELF_HWCAP2 (elf_hwcap2)
2540274f955SGrzegorz Andrejczuk
2550274f955SGrzegorz Andrejczuk /* This yields a string that ld.so will use to load implementation
2560274f955SGrzegorz Andrejczuk specific libraries for optimization. This is more specific in
2570274f955SGrzegorz Andrejczuk intent than poking at uname or /proc/cpuinfo.
2580274f955SGrzegorz Andrejczuk
259bb898558SAl Viro For the moment, we have only optimizations for the Intel generations,
260bb898558SAl Viro but that could change... */
261bb898558SAl Viro
262bb898558SAl Viro #define SET_PERSONALITY(ex) set_personality_64bit()
263bb898558SAl Viro
264bb898558SAl Viro /*
265bb898558SAl Viro * An executable for which elf_read_implies_exec() returns TRUE will
266bb898558SAl Viro * have the READ_IMPLIES_EXEC personality flag set automatically.
267bb898558SAl Viro *
268bb898558SAl Viro * The decision process for determining the results are:
269bb898558SAl Viro *
270bb898558SAl Viro * CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 |
2719d9e435fSKees Cook * ELF: | | | |
2729d9e435fSKees Cook * ---------------------|------------|------------------|----------------|
2739d9e435fSKees Cook * missing PT_GNU_STACK | exec-all | exec-all | exec-none |
274c681df88SIngo Molnar * PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack |
275c681df88SIngo Molnar * PT_GNU_STACK == RW | exec-none | exec-none | exec-none |
2769d9e435fSKees Cook *
277c681df88SIngo Molnar * exec-all : all PROT_READ user mappings are executable, except when
278c681df88SIngo Molnar * backed by files on a noexec-filesystem.
279c681df88SIngo Molnar * exec-none : only PROT_EXEC user mappings are executable.
2809d9e435fSKees Cook * exec-stack: only the stack and PROT_EXEC user mappings are executable.
2819d9e435fSKees Cook *
2829d9e435fSKees Cook * *this column has no architectural effect: NX markings are ignored by
2839d9e435fSKees Cook * hardware, but may have behavioral effects when "wants X" collides with
28412230611SKees Cook * "cannot be X" constraints in memory permission flags, as in
2859d9e435fSKees Cook * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
2869d9e435fSKees Cook *
2879d9e435fSKees Cook */
2889d9e435fSKees Cook #define elf_read_implies_exec(ex, executable_stack) \
2899d9e435fSKees Cook (mmap_is_ia32() && executable_stack == EXSTACK_DEFAULT)
2909d9e435fSKees Cook
291bb898558SAl Viro struct task_struct;
292bb898558SAl Viro
2939fccc5c0SKees Cook #define ARCH_DLINFO_IA32 \
294bb898558SAl Viro do { \
295bb898558SAl Viro if (VDSO_CURRENT_BASE) { \
296bb898558SAl Viro NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
2973d7ee969SAndy Lutomirski NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
298bb898558SAl Viro } \
2996fdc6dd9SThomas Gleixner NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
300bb898558SAl Viro } while (0)
301bb898558SAl Viro
302bb898558SAl Viro /*
3031c33bb05SChang S. Bae * True on X86_32 or when emulating IA32 on X86_64
304bb898558SAl Viro */
mmap_is_ia32(void)305bb898558SAl Viro static inline int mmap_is_ia32(void)
3068f3e474fSDmitry Safonov {
3078f3e474fSDmitry Safonov return IS_ENABLED(CONFIG_X86_32) ||
3088f3e474fSDmitry Safonov (IS_ENABLED(CONFIG_COMPAT) &&
3098f3e474fSDmitry Safonov test_thread_flag(TIF_ADDR32));
3108f3e474fSDmitry Safonov }
3118f3e474fSDmitry Safonov
3128f3e474fSDmitry Safonov extern unsigned long task_size_32bit(void);
3138f3e474fSDmitry Safonov extern unsigned long task_size_64bit(int full_addr_space);
3148f3e474fSDmitry Safonov extern unsigned long get_mmap_base(int is_legacy);
3158f3e474fSDmitry Safonov extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
316e8f01a8dSKirill A. Shutemov extern unsigned long get_sigframe_size(void);
317b569bab7SKirill A. Shutemov
318e13b73ddSDmitry Safonov #ifdef CONFIG_X86_32
3191e0f25dbSKirill A. Shutemov
3201c33bb05SChang S. Bae #define __STACK_RND_MASK(is32bit) (0x7ff)
3211b028f78SDmitry Safonov #define STACK_RND_MASK (0x7ff)
322bb898558SAl Viro
323bb898558SAl Viro #define ARCH_DLINFO ARCH_DLINFO_IA32
3248f3e474fSDmitry Safonov
32580938332SMichal Hocko /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
32680938332SMichal Hocko
3273d7ee969SAndy Lutomirski #else /* CONFIG_X86_32 */
328bb898558SAl Viro
329bb898558SAl Viro /* 1GB for 64bit, 8MB for 32bit */
330bb898558SAl Viro #define __STACK_RND_MASK(is32bit) ((is32bit) ? 0x7ff : 0x3fffff)
331bb898558SAl Viro #define STACK_RND_MASK __STACK_RND_MASK(mmap_is_ia32())
332bb898558SAl Viro
333bb898558SAl Viro #define ARCH_DLINFO \
3348f3e474fSDmitry Safonov do { \
3358f3e474fSDmitry Safonov if (vdso64_enabled) \
336bb898558SAl Viro NEW_AUX_ENT(AT_SYSINFO_EHDR, \
337bb898558SAl Viro (unsigned long __force)current->mm->context.vdso); \
338bb898558SAl Viro NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
3393d7ee969SAndy Lutomirski } while (0)
340bb898558SAl Viro
3416f121e54SAndy Lutomirski /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
3421c33bb05SChang S. Bae #define ARCH_DLINFO_X32 \
343bb898558SAl Viro do { \
344bb898558SAl Viro if (vdso64_enabled) \
3453d7ee969SAndy Lutomirski NEW_AUX_ENT(AT_SYSINFO_EHDR, \
346d1a797f3SH. Peter Anvin (unsigned long __force)current->mm->context.vdso); \
347d1a797f3SH. Peter Anvin NEW_AUX_ENT(AT_MINSIGSTKSZ, get_sigframe_size()); \
3483d7ee969SAndy Lutomirski } while (0)
349d1a797f3SH. Peter Anvin
3506f121e54SAndy Lutomirski #define AT_SYSINFO 32
3511c33bb05SChang S. Bae
352d1a797f3SH. Peter Anvin #define COMPAT_ARCH_DLINFO \
353d1a797f3SH. Peter Anvin if (exec->e_machine == EM_X86_64) \
354bb898558SAl Viro ARCH_DLINFO_X32; \
355bb898558SAl Viro else if (IS_ENABLED(CONFIG_IA32_EMULATION)) \
356d1a797f3SH. Peter Anvin ARCH_DLINFO_IA32
3572656af0dSGabriel Krisman Bertazi
358d1a797f3SH. Peter Anvin #define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
35985f2ada7SAl Viro
3603d7ee969SAndy Lutomirski #endif /* !CONFIG_X86_32 */
361bb898558SAl Viro
362bb898558SAl Viro #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
363bb898558SAl Viro
364bb898558SAl Viro #define VDSO_ENTRY \
365bb898558SAl Viro ((unsigned long)current->mm->context.vdso + \
366bb898558SAl Viro vdso_image_32.sym___kernel_vsyscall)
367bb898558SAl Viro
368bb898558SAl Viro struct linux_binprm;
3696f121e54SAndy Lutomirski
3700a6d1fa0SAndy Lutomirski #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
371bb898558SAl Viro extern int arch_setup_additional_pages(struct linux_binprm *bprm,
372bb898558SAl Viro int uses_interp);
373bb898558SAl Viro extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
374bb898558SAl Viro int uses_interp, bool x32);
375bb898558SAl Viro #define COMPAT_ARCH_SETUP_ADDITIONAL_PAGES(bprm, ex, interpreter) \
376fc5243d9SMartin Schwidefsky compat_arch_setup_additional_pages(bprm, interpreter, \
37718d0a6fdSAndy Lutomirski (ex->e_machine == EM_X86_64))
3783316ec8cSGabriel Krisman Bertazi
3793316ec8cSGabriel Krisman Bertazi extern bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs);
3803316ec8cSGabriel Krisman Bertazi
3813316ec8cSGabriel Krisman Bertazi /* Do not change the values. See get_align_mask() */
382bb898558SAl Viro enum align_flags {
383c5c87812SGabriel Krisman Bertazi ALIGN_VA_32 = BIT(0),
384c5c87812SGabriel Krisman Bertazi ALIGN_VA_64 = BIT(1),
385f9902472SMichel Lespinasse };
386dfb09f9bSBorislav Petkov
387dfb09f9bSBorislav Petkov struct va_alignment {
388dfb09f9bSBorislav Petkov int flags;
389dfb09f9bSBorislav Petkov unsigned long mask;
390dfb09f9bSBorislav Petkov unsigned long bits;
391dfb09f9bSBorislav Petkov } ____cacheline_aligned;
392dfb09f9bSBorislav Petkov
393dfb09f9bSBorislav Petkov extern struct va_alignment va_align;
3944e26d11fSHector Marco-Gisbert extern unsigned long align_vdso_addr(unsigned long);
395dfb09f9bSBorislav Petkov #endif /* _ASM_X86_ELF_H */
396dfb09f9bSBorislav Petkov