1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21965aae3SH. Peter Anvin #ifndef _ASM_X86_CMPXCHG_32_H
31965aae3SH. Peter Anvin #define _ASM_X86_CMPXCHG_32_H
4bb898558SAl Viro
5bb898558SAl Viro /*
68b9fd48eSRandy Dunlap * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
7bb898558SAl Viro * you need to test for the feature in boot_cpu_data.
8bb898558SAl Viro */
9bb898558SAl Viro
10bb898558SAl Viro #ifdef CONFIG_X86_CMPXCHG64
118bf705d1SDmitry Vyukov #define arch_cmpxchg64(ptr, o, n) \
12bb898558SAl Viro ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
13bb898558SAl Viro (unsigned long long)(n)))
148bf705d1SDmitry Vyukov #define arch_cmpxchg64_local(ptr, o, n) \
15bb898558SAl Viro ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
16bb898558SAl Viro (unsigned long long)(n)))
17c2df0a6aSUros Bizjak #define arch_try_cmpxchg64(ptr, po, n) \
18c2df0a6aSUros Bizjak __try_cmpxchg64((ptr), (unsigned long long *)(po), \
19c2df0a6aSUros Bizjak (unsigned long long)(n))
20bb898558SAl Viro #endif
21bb898558SAl Viro
__cmpxchg64(volatile u64 * ptr,u64 old,u64 new)224532b305SH. Peter Anvin static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
23bb898558SAl Viro {
244532b305SH. Peter Anvin u64 prev;
25113fc5a6SH. Peter Anvin asm volatile(LOCK_PREFIX "cmpxchg8b %1"
26113fc5a6SH. Peter Anvin : "=A" (prev),
274532b305SH. Peter Anvin "+m" (*ptr)
284532b305SH. Peter Anvin : "b" ((u32)new),
294532b305SH. Peter Anvin "c" ((u32)(new >> 32)),
30bb898558SAl Viro "0" (old)
31bb898558SAl Viro : "memory");
32bb898558SAl Viro return prev;
33bb898558SAl Viro }
34bb898558SAl Viro
__cmpxchg64_local(volatile u64 * ptr,u64 old,u64 new)354532b305SH. Peter Anvin static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
36bb898558SAl Viro {
374532b305SH. Peter Anvin u64 prev;
38113fc5a6SH. Peter Anvin asm volatile("cmpxchg8b %1"
39113fc5a6SH. Peter Anvin : "=A" (prev),
404532b305SH. Peter Anvin "+m" (*ptr)
414532b305SH. Peter Anvin : "b" ((u32)new),
424532b305SH. Peter Anvin "c" ((u32)(new >> 32)),
43bb898558SAl Viro "0" (old)
44bb898558SAl Viro : "memory");
45bb898558SAl Viro return prev;
46bb898558SAl Viro }
47bb898558SAl Viro
__try_cmpxchg64(volatile u64 * ptr,u64 * pold,u64 new)48c2df0a6aSUros Bizjak static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
49c2df0a6aSUros Bizjak {
50c2df0a6aSUros Bizjak bool success;
51c2df0a6aSUros Bizjak u64 old = *pold;
52c2df0a6aSUros Bizjak asm volatile(LOCK_PREFIX "cmpxchg8b %[ptr]"
53c2df0a6aSUros Bizjak CC_SET(z)
54c2df0a6aSUros Bizjak : CC_OUT(z) (success),
55c2df0a6aSUros Bizjak [ptr] "+m" (*ptr),
56c2df0a6aSUros Bizjak "+A" (old)
57c2df0a6aSUros Bizjak : "b" ((u32)new),
58c2df0a6aSUros Bizjak "c" ((u32)(new >> 32))
59c2df0a6aSUros Bizjak : "memory");
60c2df0a6aSUros Bizjak
61c2df0a6aSUros Bizjak if (unlikely(!success))
62c2df0a6aSUros Bizjak *pold = old;
63c2df0a6aSUros Bizjak return success;
64c2df0a6aSUros Bizjak }
65c2df0a6aSUros Bizjak
66bb898558SAl Viro #ifndef CONFIG_X86_CMPXCHG64
67bb898558SAl Viro /*
68bb898558SAl Viro * Building a kernel capable running on 80386 and 80486. It may be necessary
69bb898558SAl Viro * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
70bb898558SAl Viro */
71bb898558SAl Viro
728bf705d1SDmitry Vyukov #define arch_cmpxchg64(ptr, o, n) \
73bb898558SAl Viro ({ \
74bb898558SAl Viro __typeof__(*(ptr)) __ret; \
7579e1dd05SArjan van de Ven __typeof__(*(ptr)) __old = (o); \
7679e1dd05SArjan van de Ven __typeof__(*(ptr)) __new = (n); \
779c76b384SLuca Barbieri alternative_io(LOCK_PREFIX_HERE \
789c76b384SLuca Barbieri "call cmpxchg8b_emu", \
7979e1dd05SArjan van de Ven "lock; cmpxchg8b (%%esi)" , \
8079e1dd05SArjan van de Ven X86_FEATURE_CX8, \
8179e1dd05SArjan van de Ven "=A" (__ret), \
8279e1dd05SArjan van de Ven "S" ((ptr)), "0" (__old), \
8379e1dd05SArjan van de Ven "b" ((unsigned int)__new), \
8479e1dd05SArjan van de Ven "c" ((unsigned int)(__new>>32)) \
8579e1dd05SArjan van de Ven : "memory"); \
8679e1dd05SArjan van de Ven __ret; })
8779e1dd05SArjan van de Ven
8879e1dd05SArjan van de Ven
898bf705d1SDmitry Vyukov #define arch_cmpxchg64_local(ptr, o, n) \
90bb898558SAl Viro ({ \
91bb898558SAl Viro __typeof__(*(ptr)) __ret; \
92a378d933SH. Peter Anvin __typeof__(*(ptr)) __old = (o); \
93a378d933SH. Peter Anvin __typeof__(*(ptr)) __new = (n); \
94a378d933SH. Peter Anvin alternative_io("call cmpxchg8b_emu", \
95a378d933SH. Peter Anvin "cmpxchg8b (%%esi)" , \
96a378d933SH. Peter Anvin X86_FEATURE_CX8, \
97a378d933SH. Peter Anvin "=A" (__ret), \
98a378d933SH. Peter Anvin "S" ((ptr)), "0" (__old), \
99a378d933SH. Peter Anvin "b" ((unsigned int)__new), \
100a378d933SH. Peter Anvin "c" ((unsigned int)(__new>>32)) \
101a378d933SH. Peter Anvin : "memory"); \
102a378d933SH. Peter Anvin __ret; })
103bb898558SAl Viro
104bb898558SAl Viro #endif
105bb898558SAl Viro
106*b23e139dSPeter Zijlstra #define system_has_cmpxchg64() boot_cpu_has(X86_FEATURE_CX8)
1073824abd1SChristoph Lameter
1081965aae3SH. Peter Anvin #endif /* _ASM_X86_CMPXCHG_32_H */
109