1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_32_H
3 #define _ASM_X86_ATOMIC64_32_H
4
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 //#include <asm/cmpxchg.h>
8
9 /* An 64bit atomic type */
10
11 typedef struct {
12 s64 __aligned(8) counter;
13 } atomic64_t;
14
15 #define ATOMIC64_INIT(val) { (val) }
16
17 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
18 #ifndef ATOMIC64_EXPORT
19 #define ATOMIC64_DECL_ONE __ATOMIC64_DECL
20 #else
21 #define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
22 ATOMIC64_EXPORT(atomic64_##sym)
23 #endif
24
25 #ifdef CONFIG_X86_CMPXCHG64
26 #define __alternative_atomic64(f, g, out, in...) \
27 asm volatile("call %c[func]" \
28 : out : [func] "i" (atomic64_##g##_cx8), ## in)
29
30 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
31 #else
32 #define __alternative_atomic64(f, g, out, in...) \
33 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
34 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
35
36 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
37 ATOMIC64_DECL_ONE(sym##_386)
38
39 ATOMIC64_DECL_ONE(add_386);
40 ATOMIC64_DECL_ONE(sub_386);
41 ATOMIC64_DECL_ONE(inc_386);
42 ATOMIC64_DECL_ONE(dec_386);
43 #endif
44
45 #define alternative_atomic64(f, out, in...) \
46 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
47
48 ATOMIC64_DECL(read);
49 ATOMIC64_DECL(set);
50 ATOMIC64_DECL(xchg);
51 ATOMIC64_DECL(add_return);
52 ATOMIC64_DECL(sub_return);
53 ATOMIC64_DECL(inc_return);
54 ATOMIC64_DECL(dec_return);
55 ATOMIC64_DECL(dec_if_positive);
56 ATOMIC64_DECL(inc_not_zero);
57 ATOMIC64_DECL(add_unless);
58
59 #undef ATOMIC64_DECL
60 #undef ATOMIC64_DECL_ONE
61 #undef __ATOMIC64_DECL
62 #undef ATOMIC64_EXPORT
63
arch_atomic64_cmpxchg(atomic64_t * v,s64 o,s64 n)64 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
65 {
66 return arch_cmpxchg64(&v->counter, o, n);
67 }
68 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
69
arch_atomic64_xchg(atomic64_t * v,s64 n)70 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
71 {
72 s64 o;
73 unsigned high = (unsigned)(n >> 32);
74 unsigned low = (unsigned)n;
75 alternative_atomic64(xchg, "=&A" (o),
76 "S" (v), "b" (low), "c" (high)
77 : "memory");
78 return o;
79 }
80 #define arch_atomic64_xchg arch_atomic64_xchg
81
arch_atomic64_set(atomic64_t * v,s64 i)82 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
83 {
84 unsigned high = (unsigned)(i >> 32);
85 unsigned low = (unsigned)i;
86 alternative_atomic64(set, /* no output */,
87 "S" (v), "b" (low), "c" (high)
88 : "eax", "edx", "memory");
89 }
90
arch_atomic64_read(const atomic64_t * v)91 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
92 {
93 s64 r;
94 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
95 return r;
96 }
97
arch_atomic64_add_return(s64 i,atomic64_t * v)98 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
99 {
100 alternative_atomic64(add_return,
101 ASM_OUTPUT2("+A" (i), "+c" (v)),
102 ASM_NO_INPUT_CLOBBER("memory"));
103 return i;
104 }
105 #define arch_atomic64_add_return arch_atomic64_add_return
106
arch_atomic64_sub_return(s64 i,atomic64_t * v)107 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
108 {
109 alternative_atomic64(sub_return,
110 ASM_OUTPUT2("+A" (i), "+c" (v)),
111 ASM_NO_INPUT_CLOBBER("memory"));
112 return i;
113 }
114 #define arch_atomic64_sub_return arch_atomic64_sub_return
115
arch_atomic64_inc_return(atomic64_t * v)116 static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
117 {
118 s64 a;
119 alternative_atomic64(inc_return, "=&A" (a),
120 "S" (v) : "memory", "ecx");
121 return a;
122 }
123 #define arch_atomic64_inc_return arch_atomic64_inc_return
124
arch_atomic64_dec_return(atomic64_t * v)125 static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
126 {
127 s64 a;
128 alternative_atomic64(dec_return, "=&A" (a),
129 "S" (v) : "memory", "ecx");
130 return a;
131 }
132 #define arch_atomic64_dec_return arch_atomic64_dec_return
133
arch_atomic64_add(s64 i,atomic64_t * v)134 static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
135 {
136 __alternative_atomic64(add, add_return,
137 ASM_OUTPUT2("+A" (i), "+c" (v)),
138 ASM_NO_INPUT_CLOBBER("memory"));
139 return i;
140 }
141
arch_atomic64_sub(s64 i,atomic64_t * v)142 static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
143 {
144 __alternative_atomic64(sub, sub_return,
145 ASM_OUTPUT2("+A" (i), "+c" (v)),
146 ASM_NO_INPUT_CLOBBER("memory"));
147 return i;
148 }
149
arch_atomic64_inc(atomic64_t * v)150 static __always_inline void arch_atomic64_inc(atomic64_t *v)
151 {
152 __alternative_atomic64(inc, inc_return, /* no output */,
153 "S" (v) : "memory", "eax", "ecx", "edx");
154 }
155 #define arch_atomic64_inc arch_atomic64_inc
156
arch_atomic64_dec(atomic64_t * v)157 static __always_inline void arch_atomic64_dec(atomic64_t *v)
158 {
159 __alternative_atomic64(dec, dec_return, /* no output */,
160 "S" (v) : "memory", "eax", "ecx", "edx");
161 }
162 #define arch_atomic64_dec arch_atomic64_dec
163
arch_atomic64_add_unless(atomic64_t * v,s64 a,s64 u)164 static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
165 {
166 unsigned low = (unsigned)u;
167 unsigned high = (unsigned)(u >> 32);
168 alternative_atomic64(add_unless,
169 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
170 "S" (v) : "memory");
171 return (int)a;
172 }
173 #define arch_atomic64_add_unless arch_atomic64_add_unless
174
arch_atomic64_inc_not_zero(atomic64_t * v)175 static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
176 {
177 int r;
178 alternative_atomic64(inc_not_zero, "=&a" (r),
179 "S" (v) : "ecx", "edx", "memory");
180 return r;
181 }
182 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
183
arch_atomic64_dec_if_positive(atomic64_t * v)184 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
185 {
186 s64 r;
187 alternative_atomic64(dec_if_positive, "=&A" (r),
188 "S" (v) : "ecx", "memory");
189 return r;
190 }
191 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
192
193 #undef alternative_atomic64
194 #undef __alternative_atomic64
195
arch_atomic64_and(s64 i,atomic64_t * v)196 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
197 {
198 s64 old, c = 0;
199
200 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
201 c = old;
202 }
203
arch_atomic64_fetch_and(s64 i,atomic64_t * v)204 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
205 {
206 s64 old, c = 0;
207
208 while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c)
209 c = old;
210
211 return old;
212 }
213 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
214
arch_atomic64_or(s64 i,atomic64_t * v)215 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
216 {
217 s64 old, c = 0;
218
219 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
220 c = old;
221 }
222
arch_atomic64_fetch_or(s64 i,atomic64_t * v)223 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
224 {
225 s64 old, c = 0;
226
227 while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c)
228 c = old;
229
230 return old;
231 }
232 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
233
arch_atomic64_xor(s64 i,atomic64_t * v)234 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
235 {
236 s64 old, c = 0;
237
238 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
239 c = old;
240 }
241
arch_atomic64_fetch_xor(s64 i,atomic64_t * v)242 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
243 {
244 s64 old, c = 0;
245
246 while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c)
247 c = old;
248
249 return old;
250 }
251 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
252
arch_atomic64_fetch_add(s64 i,atomic64_t * v)253 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
254 {
255 s64 old, c = 0;
256
257 while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
258 c = old;
259
260 return old;
261 }
262 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
263
264 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
265
266 #endif /* _ASM_X86_ATOMIC64_32_H */
267