1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */ 2 /* 3 * AARCH64 specific definitions for NOLIBC 4 * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu> 5 */ 6 7 #ifndef _NOLIBC_ARCH_AARCH64_H 8 #define _NOLIBC_ARCH_AARCH64_H 9 10 #include "compiler.h" 11 12 /* The struct returned by the newfstatat() syscall. Differs slightly from the 13 * x86_64's stat one by field ordering, so be careful. 14 */ 15 struct sys_stat_struct { 16 unsigned long st_dev; 17 unsigned long st_ino; 18 unsigned int st_mode; 19 unsigned int st_nlink; 20 unsigned int st_uid; 21 unsigned int st_gid; 22 23 unsigned long st_rdev; 24 unsigned long __pad1; 25 long st_size; 26 int st_blksize; 27 int __pad2; 28 29 long st_blocks; 30 long st_atime; 31 unsigned long st_atime_nsec; 32 long st_mtime; 33 34 unsigned long st_mtime_nsec; 35 long st_ctime; 36 unsigned long st_ctime_nsec; 37 unsigned int __unused[2]; 38 }; 39 40 /* Syscalls for AARCH64 : 41 * - registers are 64-bit 42 * - stack is 16-byte aligned 43 * - syscall number is passed in x8 44 * - arguments are in x0, x1, x2, x3, x4, x5 45 * - the system call is performed by calling svc 0 46 * - syscall return comes in x0. 47 * - the arguments are cast to long and assigned into the target registers 48 * which are then simply passed as registers to the asm code, so that we 49 * don't have to experience issues with register constraints. 50 * 51 * On aarch64, select() is not implemented so we have to use pselect6(). 52 */ 53 #define __ARCH_WANT_SYS_PSELECT6 54 55 #define my_syscall0(num) \ 56 ({ \ 57 register long _num __asm__ ("x8") = (num); \ 58 register long _arg1 __asm__ ("x0"); \ 59 \ 60 __asm__ volatile ( \ 61 "svc #0\n" \ 62 : "=r"(_arg1) \ 63 : "r"(_num) \ 64 : "memory", "cc" \ 65 ); \ 66 _arg1; \ 67 }) 68 69 #define my_syscall1(num, arg1) \ 70 ({ \ 71 register long _num __asm__ ("x8") = (num); \ 72 register long _arg1 __asm__ ("x0") = (long)(arg1); \ 73 \ 74 __asm__ volatile ( \ 75 "svc #0\n" \ 76 : "=r"(_arg1) \ 77 : "r"(_arg1), \ 78 "r"(_num) \ 79 : "memory", "cc" \ 80 ); \ 81 _arg1; \ 82 }) 83 84 #define my_syscall2(num, arg1, arg2) \ 85 ({ \ 86 register long _num __asm__ ("x8") = (num); \ 87 register long _arg1 __asm__ ("x0") = (long)(arg1); \ 88 register long _arg2 __asm__ ("x1") = (long)(arg2); \ 89 \ 90 __asm__ volatile ( \ 91 "svc #0\n" \ 92 : "=r"(_arg1) \ 93 : "r"(_arg1), "r"(_arg2), \ 94 "r"(_num) \ 95 : "memory", "cc" \ 96 ); \ 97 _arg1; \ 98 }) 99 100 #define my_syscall3(num, arg1, arg2, arg3) \ 101 ({ \ 102 register long _num __asm__ ("x8") = (num); \ 103 register long _arg1 __asm__ ("x0") = (long)(arg1); \ 104 register long _arg2 __asm__ ("x1") = (long)(arg2); \ 105 register long _arg3 __asm__ ("x2") = (long)(arg3); \ 106 \ 107 __asm__ volatile ( \ 108 "svc #0\n" \ 109 : "=r"(_arg1) \ 110 : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ 111 "r"(_num) \ 112 : "memory", "cc" \ 113 ); \ 114 _arg1; \ 115 }) 116 117 #define my_syscall4(num, arg1, arg2, arg3, arg4) \ 118 ({ \ 119 register long _num __asm__ ("x8") = (num); \ 120 register long _arg1 __asm__ ("x0") = (long)(arg1); \ 121 register long _arg2 __asm__ ("x1") = (long)(arg2); \ 122 register long _arg3 __asm__ ("x2") = (long)(arg3); \ 123 register long _arg4 __asm__ ("x3") = (long)(arg4); \ 124 \ 125 __asm__ volatile ( \ 126 "svc #0\n" \ 127 : "=r"(_arg1) \ 128 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ 129 "r"(_num) \ 130 : "memory", "cc" \ 131 ); \ 132 _arg1; \ 133 }) 134 135 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ 136 ({ \ 137 register long _num __asm__ ("x8") = (num); \ 138 register long _arg1 __asm__ ("x0") = (long)(arg1); \ 139 register long _arg2 __asm__ ("x1") = (long)(arg2); \ 140 register long _arg3 __asm__ ("x2") = (long)(arg3); \ 141 register long _arg4 __asm__ ("x3") = (long)(arg4); \ 142 register long _arg5 __asm__ ("x4") = (long)(arg5); \ 143 \ 144 __asm__ volatile ( \ 145 "svc #0\n" \ 146 : "=r" (_arg1) \ 147 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ 148 "r"(_num) \ 149 : "memory", "cc" \ 150 ); \ 151 _arg1; \ 152 }) 153 154 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ 155 ({ \ 156 register long _num __asm__ ("x8") = (num); \ 157 register long _arg1 __asm__ ("x0") = (long)(arg1); \ 158 register long _arg2 __asm__ ("x1") = (long)(arg2); \ 159 register long _arg3 __asm__ ("x2") = (long)(arg3); \ 160 register long _arg4 __asm__ ("x3") = (long)(arg4); \ 161 register long _arg5 __asm__ ("x4") = (long)(arg5); \ 162 register long _arg6 __asm__ ("x5") = (long)(arg6); \ 163 \ 164 __asm__ volatile ( \ 165 "svc #0\n" \ 166 : "=r" (_arg1) \ 167 : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ 168 "r"(_arg6), "r"(_num) \ 169 : "memory", "cc" \ 170 ); \ 171 _arg1; \ 172 }) 173 174 char **environ __attribute__((weak)); 175 const unsigned long *_auxv __attribute__((weak)); 176 177 /* startup code */ 178 void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void) 179 { 180 __asm__ volatile ( 181 #ifdef _NOLIBC_STACKPROTECTOR 182 "bl __stack_chk_init\n" /* initialize stack protector */ 183 #endif 184 "ldr x0, [sp]\n" /* argc (x0) was in the stack */ 185 "add x1, sp, 8\n" /* argv (x1) = sp */ 186 "lsl x2, x0, 3\n" /* envp (x2) = 8*argc ... */ 187 "add x2, x2, 8\n" /* + 8 (skip null) */ 188 "add x2, x2, x1\n" /* + argv */ 189 "adrp x3, environ\n" /* x3 = &environ (high bits) */ 190 "str x2, [x3, #:lo12:environ]\n" /* store envp into environ */ 191 "mov x4, x2\n" /* search for auxv (follows NULL after last env) */ 192 "0:\n" 193 "ldr x5, [x4], 8\n" /* x5 = *x4; x4 += 8 */ 194 "cbnz x5, 0b\n" /* and stop at NULL after last env */ 195 "adrp x3, _auxv\n" /* x3 = &_auxv (high bits) */ 196 "str x4, [x3, #:lo12:_auxv]\n" /* store x4 into _auxv */ 197 "and sp, x1, -16\n" /* sp must be 16-byte aligned in the callee */ 198 "bl main\n" /* main() returns the status code, we'll exit with it. */ 199 "mov x8, 93\n" /* NR_exit == 93 */ 200 "svc #0\n" 201 ); 202 __builtin_unreachable(); 203 } 204 #endif /* _NOLIBC_ARCH_AARCH64_H */ 205