1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3 * PowerPC specific definitions for NOLIBC
4 * Copyright (C) 2023 Zhangjin Wu <falcon@tinylab.org>
5 */
6
7 #ifndef _NOLIBC_ARCH_POWERPC_H
8 #define _NOLIBC_ARCH_POWERPC_H
9
10 #include "compiler.h"
11 #include "crt.h"
12
13 /* Syscalls for PowerPC :
14 * - stack is 16-byte aligned
15 * - syscall number is passed in r0
16 * - arguments are in r3, r4, r5, r6, r7, r8, r9
17 * - the system call is performed by calling "sc"
18 * - syscall return comes in r3, and the summary overflow bit is checked
19 * to know if an error occurred, in which case errno is in r3.
20 * - the arguments are cast to long and assigned into the target
21 * registers which are then simply passed as registers to the asm code,
22 * so that we don't have to experience issues with register constraints.
23 */
24
25 #define _NOLIBC_SYSCALL_CLOBBERLIST \
26 "memory", "cr0", "r12", "r11", "r10", "r9"
27
28 #define my_syscall0(num) \
29 ({ \
30 register long _ret __asm__ ("r3"); \
31 register long _num __asm__ ("r0") = (num); \
32 \
33 __asm__ volatile ( \
34 " sc\n" \
35 " bns+ 1f\n" \
36 " neg %0, %0\n" \
37 "1:\n" \
38 : "=r"(_ret), "+r"(_num) \
39 : \
40 : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5", "r4" \
41 ); \
42 _ret; \
43 })
44
45 #define my_syscall1(num, arg1) \
46 ({ \
47 register long _ret __asm__ ("r3"); \
48 register long _num __asm__ ("r0") = (num); \
49 register long _arg1 __asm__ ("r3") = (long)(arg1); \
50 \
51 __asm__ volatile ( \
52 " sc\n" \
53 " bns+ 1f\n" \
54 " neg %0, %0\n" \
55 "1:\n" \
56 : "=r"(_ret), "+r"(_num) \
57 : "0"(_arg1) \
58 : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5", "r4" \
59 ); \
60 _ret; \
61 })
62
63
64 #define my_syscall2(num, arg1, arg2) \
65 ({ \
66 register long _ret __asm__ ("r3"); \
67 register long _num __asm__ ("r0") = (num); \
68 register long _arg1 __asm__ ("r3") = (long)(arg1); \
69 register long _arg2 __asm__ ("r4") = (long)(arg2); \
70 \
71 __asm__ volatile ( \
72 " sc\n" \
73 " bns+ 1f\n" \
74 " neg %0, %0\n" \
75 "1:\n" \
76 : "=r"(_ret), "+r"(_num), "+r"(_arg2) \
77 : "0"(_arg1) \
78 : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5" \
79 ); \
80 _ret; \
81 })
82
83
84 #define my_syscall3(num, arg1, arg2, arg3) \
85 ({ \
86 register long _ret __asm__ ("r3"); \
87 register long _num __asm__ ("r0") = (num); \
88 register long _arg1 __asm__ ("r3") = (long)(arg1); \
89 register long _arg2 __asm__ ("r4") = (long)(arg2); \
90 register long _arg3 __asm__ ("r5") = (long)(arg3); \
91 \
92 __asm__ volatile ( \
93 " sc\n" \
94 " bns+ 1f\n" \
95 " neg %0, %0\n" \
96 "1:\n" \
97 : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3) \
98 : "0"(_arg1) \
99 : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6" \
100 ); \
101 _ret; \
102 })
103
104
105 #define my_syscall4(num, arg1, arg2, arg3, arg4) \
106 ({ \
107 register long _ret __asm__ ("r3"); \
108 register long _num __asm__ ("r0") = (num); \
109 register long _arg1 __asm__ ("r3") = (long)(arg1); \
110 register long _arg2 __asm__ ("r4") = (long)(arg2); \
111 register long _arg3 __asm__ ("r5") = (long)(arg3); \
112 register long _arg4 __asm__ ("r6") = (long)(arg4); \
113 \
114 __asm__ volatile ( \
115 " sc\n" \
116 " bns+ 1f\n" \
117 " neg %0, %0\n" \
118 "1:\n" \
119 : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
120 "+r"(_arg4) \
121 : "0"(_arg1) \
122 : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7" \
123 ); \
124 _ret; \
125 })
126
127
128 #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
129 ({ \
130 register long _ret __asm__ ("r3"); \
131 register long _num __asm__ ("r0") = (num); \
132 register long _arg1 __asm__ ("r3") = (long)(arg1); \
133 register long _arg2 __asm__ ("r4") = (long)(arg2); \
134 register long _arg3 __asm__ ("r5") = (long)(arg3); \
135 register long _arg4 __asm__ ("r6") = (long)(arg4); \
136 register long _arg5 __asm__ ("r7") = (long)(arg5); \
137 \
138 __asm__ volatile ( \
139 " sc\n" \
140 " bns+ 1f\n" \
141 " neg %0, %0\n" \
142 "1:\n" \
143 : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
144 "+r"(_arg4), "+r"(_arg5) \
145 : "0"(_arg1) \
146 : _NOLIBC_SYSCALL_CLOBBERLIST, "r8" \
147 ); \
148 _ret; \
149 })
150
151 #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
152 ({ \
153 register long _ret __asm__ ("r3"); \
154 register long _num __asm__ ("r0") = (num); \
155 register long _arg1 __asm__ ("r3") = (long)(arg1); \
156 register long _arg2 __asm__ ("r4") = (long)(arg2); \
157 register long _arg3 __asm__ ("r5") = (long)(arg3); \
158 register long _arg4 __asm__ ("r6") = (long)(arg4); \
159 register long _arg5 __asm__ ("r7") = (long)(arg5); \
160 register long _arg6 __asm__ ("r8") = (long)(arg6); \
161 \
162 __asm__ volatile ( \
163 " sc\n" \
164 " bns+ 1f\n" \
165 " neg %0, %0\n" \
166 "1:\n" \
167 : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
168 "+r"(_arg4), "+r"(_arg5), "+r"(_arg6) \
169 : "0"(_arg1) \
170 : _NOLIBC_SYSCALL_CLOBBERLIST \
171 ); \
172 _ret; \
173 })
174
175 #if !defined(__powerpc64__) && !defined(__clang__)
176 /* FIXME: For 32-bit PowerPC, with newer gcc compilers (e.g. gcc 13.1.0),
177 * "omit-frame-pointer" fails with __attribute__((no_stack_protector)) but
178 * works with __attribute__((__optimize__("-fno-stack-protector")))
179 */
180 #ifdef __no_stack_protector
181 #undef __no_stack_protector
182 #define __no_stack_protector __attribute__((__optimize__("-fno-stack-protector")))
183 #endif
184 #endif /* !__powerpc64__ */
185
186 /* startup code */
_start(void)187 void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
188 {
189 #ifdef __powerpc64__
190 #if _CALL_ELF == 2
191 /* with -mabi=elfv2, save TOC/GOT pointer to r2
192 * r12 is global entry pointer, we use it to compute TOC from r12
193 * https://www.llvm.org/devmtg/2014-04/PDFs/Talks/Euro-LLVM-2014-Weigand.pdf
194 * https://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.pdf
195 */
196 __asm__ volatile (
197 "addis 2, 12, .TOC. - _start@ha\n"
198 "addi 2, 2, .TOC. - _start@l\n"
199 );
200 #endif /* _CALL_ELF == 2 */
201
202 __asm__ volatile (
203 "mr 3, 1\n" /* save stack pointer to r3, as arg1 of _start_c */
204 "clrrdi 1, 1, 4\n" /* align the stack to 16 bytes */
205 "li 0, 0\n" /* zero the frame pointer */
206 "stdu 1, -32(1)\n" /* the initial stack frame */
207 "bl _start_c\n" /* transfer to c runtime */
208 );
209 #else
210 __asm__ volatile (
211 "mr 3, 1\n" /* save stack pointer to r3, as arg1 of _start_c */
212 "clrrwi 1, 1, 4\n" /* align the stack to 16 bytes */
213 "li 0, 0\n" /* zero the frame pointer */
214 "stwu 1, -16(1)\n" /* the initial stack frame */
215 "bl _start_c\n" /* transfer to c runtime */
216 );
217 #endif
218 __builtin_unreachable();
219 }
220
221 #endif /* _NOLIBC_ARCH_POWERPC_H */
222