1 /* 2 * S390 version 3 * 4 * Derived from "include/asm-i386/elf.h" 5 */ 6 7 #ifndef __ASMS390_ELF_H 8 #define __ASMS390_ELF_H 9 10 /* s390 relocations defined by the ABIs */ 11 #define R_390_NONE 0 /* No reloc. */ 12 #define R_390_8 1 /* Direct 8 bit. */ 13 #define R_390_12 2 /* Direct 12 bit. */ 14 #define R_390_16 3 /* Direct 16 bit. */ 15 #define R_390_32 4 /* Direct 32 bit. */ 16 #define R_390_PC32 5 /* PC relative 32 bit. */ 17 #define R_390_GOT12 6 /* 12 bit GOT offset. */ 18 #define R_390_GOT32 7 /* 32 bit GOT offset. */ 19 #define R_390_PLT32 8 /* 32 bit PC relative PLT address. */ 20 #define R_390_COPY 9 /* Copy symbol at runtime. */ 21 #define R_390_GLOB_DAT 10 /* Create GOT entry. */ 22 #define R_390_JMP_SLOT 11 /* Create PLT entry. */ 23 #define R_390_RELATIVE 12 /* Adjust by program base. */ 24 #define R_390_GOTOFF32 13 /* 32 bit offset to GOT. */ 25 #define R_390_GOTPC 14 /* 32 bit PC rel. offset to GOT. */ 26 #define R_390_GOT16 15 /* 16 bit GOT offset. */ 27 #define R_390_PC16 16 /* PC relative 16 bit. */ 28 #define R_390_PC16DBL 17 /* PC relative 16 bit shifted by 1. */ 29 #define R_390_PLT16DBL 18 /* 16 bit PC rel. PLT shifted by 1. */ 30 #define R_390_PC32DBL 19 /* PC relative 32 bit shifted by 1. */ 31 #define R_390_PLT32DBL 20 /* 32 bit PC rel. PLT shifted by 1. */ 32 #define R_390_GOTPCDBL 21 /* 32 bit PC rel. GOT shifted by 1. */ 33 #define R_390_64 22 /* Direct 64 bit. */ 34 #define R_390_PC64 23 /* PC relative 64 bit. */ 35 #define R_390_GOT64 24 /* 64 bit GOT offset. */ 36 #define R_390_PLT64 25 /* 64 bit PC relative PLT address. */ 37 #define R_390_GOTENT 26 /* 32 bit PC rel. to GOT entry >> 1. */ 38 #define R_390_GOTOFF16 27 /* 16 bit offset to GOT. */ 39 #define R_390_GOTOFF64 28 /* 64 bit offset to GOT. */ 40 #define R_390_GOTPLT12 29 /* 12 bit offset to jump slot. */ 41 #define R_390_GOTPLT16 30 /* 16 bit offset to jump slot. */ 42 #define R_390_GOTPLT32 31 /* 32 bit offset to jump slot. */ 43 #define R_390_GOTPLT64 32 /* 64 bit offset to jump slot. */ 44 #define R_390_GOTPLTENT 33 /* 32 bit rel. offset to jump slot. */ 45 #define R_390_PLTOFF16 34 /* 16 bit offset from GOT to PLT. */ 46 #define R_390_PLTOFF32 35 /* 32 bit offset from GOT to PLT. */ 47 #define R_390_PLTOFF64 36 /* 16 bit offset from GOT to PLT. */ 48 #define R_390_TLS_LOAD 37 /* Tag for load insn in TLS code. */ 49 #define R_390_TLS_GDCALL 38 /* Tag for function call in general 50 dynamic TLS code. */ 51 #define R_390_TLS_LDCALL 39 /* Tag for function call in local 52 dynamic TLS code. */ 53 #define R_390_TLS_GD32 40 /* Direct 32 bit for general dynamic 54 thread local data. */ 55 #define R_390_TLS_GD64 41 /* Direct 64 bit for general dynamic 56 thread local data. */ 57 #define R_390_TLS_GOTIE12 42 /* 12 bit GOT offset for static TLS 58 block offset. */ 59 #define R_390_TLS_GOTIE32 43 /* 32 bit GOT offset for static TLS 60 block offset. */ 61 #define R_390_TLS_GOTIE64 44 /* 64 bit GOT offset for static TLS 62 block offset. */ 63 #define R_390_TLS_LDM32 45 /* Direct 32 bit for local dynamic 64 thread local data in LD code. */ 65 #define R_390_TLS_LDM64 46 /* Direct 64 bit for local dynamic 66 thread local data in LD code. */ 67 #define R_390_TLS_IE32 47 /* 32 bit address of GOT entry for 68 negated static TLS block offset. */ 69 #define R_390_TLS_IE64 48 /* 64 bit address of GOT entry for 70 negated static TLS block offset. */ 71 #define R_390_TLS_IEENT 49 /* 32 bit rel. offset to GOT entry for 72 negated static TLS block offset. */ 73 #define R_390_TLS_LE32 50 /* 32 bit negated offset relative to 74 static TLS block. */ 75 #define R_390_TLS_LE64 51 /* 64 bit negated offset relative to 76 static TLS block. */ 77 #define R_390_TLS_LDO32 52 /* 32 bit offset relative to TLS 78 block. */ 79 #define R_390_TLS_LDO64 53 /* 64 bit offset relative to TLS 80 block. */ 81 #define R_390_TLS_DTPMOD 54 /* ID of module containing symbol. */ 82 #define R_390_TLS_DTPOFF 55 /* Offset in TLS block. */ 83 #define R_390_TLS_TPOFF 56 /* Negate offset in static TLS 84 block. */ 85 #define R_390_20 57 /* Direct 20 bit. */ 86 #define R_390_GOT20 58 /* 20 bit GOT offset. */ 87 #define R_390_GOTPLT20 59 /* 20 bit offset to jump slot. */ 88 #define R_390_TLS_GOTIE20 60 /* 20 bit GOT offset for static TLS 89 block offset. */ 90 /* Keep this the last entry. */ 91 #define R_390_NUM 61 92 93 /* Bits present in AT_HWCAP. */ 94 #define HWCAP_S390_ESAN3 1 95 #define HWCAP_S390_ZARCH 2 96 #define HWCAP_S390_STFLE 4 97 #define HWCAP_S390_MSA 8 98 #define HWCAP_S390_LDISP 16 99 #define HWCAP_S390_EIMM 32 100 #define HWCAP_S390_DFP 64 101 #define HWCAP_S390_HPAGE 128 102 #define HWCAP_S390_ETF3EH 256 103 #define HWCAP_S390_HIGH_GPRS 512 104 #define HWCAP_S390_TE 1024 105 #define HWCAP_S390_VXRS 2048 106 #define HWCAP_S390_VXRS_BCD 4096 107 #define HWCAP_S390_VXRS_EXT 8192 108 #define HWCAP_S390_GS 16384 109 110 /* Internal bits, not exposed via elf */ 111 #define HWCAP_INT_SIE 1UL 112 113 /* 114 * These are used to set parameters in the core dumps. 115 */ 116 #define ELF_CLASS ELFCLASS64 117 #define ELF_DATA ELFDATA2MSB 118 #define ELF_ARCH EM_S390 119 120 /* s390 specific phdr types */ 121 #define PT_S390_PGSTE 0x70000000 122 123 /* 124 * ELF register definitions.. 125 */ 126 127 #include <asm/ptrace.h> 128 #include <asm/compat.h> 129 #include <asm/syscall.h> 130 #include <asm/user.h> 131 132 typedef s390_fp_regs elf_fpregset_t; 133 typedef s390_regs elf_gregset_t; 134 135 typedef s390_fp_regs compat_elf_fpregset_t; 136 typedef s390_compat_regs compat_elf_gregset_t; 137 138 #include <linux/compat.h> 139 #include <linux/sched/mm.h> /* for task_struct */ 140 #include <asm/mmu_context.h> 141 142 #include <asm/vdso.h> 143 144 extern unsigned int vdso_enabled; 145 146 /* 147 * This is used to ensure we don't load something for the wrong architecture. 148 */ 149 #define elf_check_arch(x) \ 150 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 151 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 152 #define compat_elf_check_arch(x) \ 153 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \ 154 && (x)->e_ident[EI_CLASS] == ELF_CLASS) 155 #define compat_start_thread start_thread31 156 157 struct arch_elf_state { 158 int rc; 159 }; 160 161 #define INIT_ARCH_ELF_STATE { .rc = 0 } 162 163 #define arch_check_elf(ehdr, interp, interp_ehdr, state) (0) 164 #ifdef CONFIG_PGSTE 165 #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 166 ({ \ 167 struct arch_elf_state *_state = state; \ 168 if ((phdr)->p_type == PT_S390_PGSTE && \ 169 !page_table_allocate_pgste && \ 170 !test_thread_flag(TIF_PGSTE) && \ 171 !current->mm->context.alloc_pgste) { \ 172 set_thread_flag(TIF_PGSTE); \ 173 set_pt_regs_flag(task_pt_regs(current), \ 174 PIF_SYSCALL_RESTART); \ 175 _state->rc = -EAGAIN; \ 176 } \ 177 _state->rc; \ 178 }) 179 #else 180 #define arch_elf_pt_proc(ehdr, phdr, elf, interp, state) \ 181 ({ \ 182 (state)->rc; \ 183 }) 184 #endif 185 186 /* For SVR4/S390 the function pointer to be registered with `atexit` is 187 passed in R14. */ 188 #define ELF_PLAT_INIT(_r, load_addr) \ 189 do { \ 190 _r->gprs[14] = 0; \ 191 } while (0) 192 193 #define CORE_DUMP_USE_REGSET 194 #define ELF_EXEC_PAGESIZE 4096 195 196 /* 197 * This is the base location for PIE (ET_DYN with INTERP) loads. On 198 * 64-bit, this is raised to 4GB to leave the entire 32-bit address 199 * space open for things that want to use the area for 32-bit pointers. 200 */ 201 #define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ 202 0x100000000UL) 203 204 /* This yields a mask that user programs can use to figure out what 205 instruction set this CPU supports. */ 206 207 extern unsigned long elf_hwcap; 208 #define ELF_HWCAP (elf_hwcap) 209 210 /* Internal hardware capabilities, not exposed via elf */ 211 212 extern unsigned long int_hwcap; 213 214 /* This yields a string that ld.so will use to load implementation 215 specific libraries for optimization. This is more specific in 216 intent than poking at uname or /proc/cpuinfo. 217 218 For the moment, we have only optimizations for the Intel generations, 219 but that could change... */ 220 221 #define ELF_PLATFORM_SIZE 8 222 extern char elf_platform[]; 223 #define ELF_PLATFORM (elf_platform) 224 225 #ifndef CONFIG_COMPAT 226 #define SET_PERSONALITY(ex) \ 227 do { \ 228 set_personality(PER_LINUX | \ 229 (current->personality & (~PER_MASK))); \ 230 current->thread.sys_call_table = \ 231 (unsigned long) &sys_call_table; \ 232 } while (0) 233 #else /* CONFIG_COMPAT */ 234 #define SET_PERSONALITY(ex) \ 235 do { \ 236 if (personality(current->personality) != PER_LINUX32) \ 237 set_personality(PER_LINUX | \ 238 (current->personality & ~PER_MASK)); \ 239 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ 240 set_thread_flag(TIF_31BIT); \ 241 current->thread.sys_call_table = \ 242 (unsigned long) &sys_call_table_emu; \ 243 } else { \ 244 clear_thread_flag(TIF_31BIT); \ 245 current->thread.sys_call_table = \ 246 (unsigned long) &sys_call_table; \ 247 } \ 248 } while (0) 249 #endif /* CONFIG_COMPAT */ 250 251 /* 252 * Cache aliasing on the latest machines calls for a mapping granularity 253 * of 512KB. For 64-bit processes use a 512KB alignment and a randomization 254 * of up to 1GB. For 31-bit processes the virtual address space is limited, 255 * use no alignment and limit the randomization to 8MB. 256 */ 257 #define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) 258 #define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 259 #define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 260 #define STACK_RND_MASK MMAP_RND_MASK 261 262 /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 263 #define ARCH_DLINFO \ 264 do { \ 265 if (vdso_enabled) \ 266 NEW_AUX_ENT(AT_SYSINFO_EHDR, \ 267 (unsigned long)current->mm->context.vdso_base); \ 268 } while (0) 269 270 struct linux_binprm; 271 272 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 273 int arch_setup_additional_pages(struct linux_binprm *, int); 274 275 #endif 276