1b4d0d230SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2fab957c1SPalmer Dabbelt /*
3fab957c1SPalmer Dabbelt * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4fab957c1SPalmer Dabbelt * Copyright (C) 2012 Regents of the University of California
5fab957c1SPalmer Dabbelt * Copyright (C) 2017 SiFive
6fab957c1SPalmer Dabbelt */
7fab957c1SPalmer Dabbelt
8fab957c1SPalmer Dabbelt #ifndef _ASM_RISCV_ATOMIC_H
9fab957c1SPalmer Dabbelt #define _ASM_RISCV_ATOMIC_H
10fab957c1SPalmer Dabbelt
11fab957c1SPalmer Dabbelt #ifdef CONFIG_GENERIC_ATOMIC64
12fab957c1SPalmer Dabbelt # include <asm-generic/atomic64.h>
13fab957c1SPalmer Dabbelt #else
14fab957c1SPalmer Dabbelt # if (__riscv_xlen < 64)
15fab957c1SPalmer Dabbelt # error "64-bit atomics require XLEN to be at least 64"
16fab957c1SPalmer Dabbelt # endif
17fab957c1SPalmer Dabbelt #endif
18fab957c1SPalmer Dabbelt
19fab957c1SPalmer Dabbelt #include <asm/cmpxchg.h>
20fab957c1SPalmer Dabbelt #include <asm/barrier.h>
21fab957c1SPalmer Dabbelt
22fd2efaa4SMark Rutland #define __atomic_acquire_fence() \
23fd2efaa4SMark Rutland __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
245ce6c1f3SAndrea Parri
25fd2efaa4SMark Rutland #define __atomic_release_fence() \
26fd2efaa4SMark Rutland __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
275ce6c1f3SAndrea Parri
arch_atomic_read(const atomic_t * v)289efbb355SMark Rutland static __always_inline int arch_atomic_read(const atomic_t *v)
29fab957c1SPalmer Dabbelt {
30fab957c1SPalmer Dabbelt return READ_ONCE(v->counter);
31fab957c1SPalmer Dabbelt }
arch_atomic_set(atomic_t * v,int i)329efbb355SMark Rutland static __always_inline void arch_atomic_set(atomic_t *v, int i)
33fab957c1SPalmer Dabbelt {
34fab957c1SPalmer Dabbelt WRITE_ONCE(v->counter, i);
35fab957c1SPalmer Dabbelt }
36fab957c1SPalmer Dabbelt
37fab957c1SPalmer Dabbelt #ifndef CONFIG_GENERIC_ATOMIC64
38fab957c1SPalmer Dabbelt #define ATOMIC64_INIT(i) { (i) }
arch_atomic64_read(const atomic64_t * v)399efbb355SMark Rutland static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
40fab957c1SPalmer Dabbelt {
41fab957c1SPalmer Dabbelt return READ_ONCE(v->counter);
42fab957c1SPalmer Dabbelt }
arch_atomic64_set(atomic64_t * v,s64 i)439efbb355SMark Rutland static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
44fab957c1SPalmer Dabbelt {
45fab957c1SPalmer Dabbelt WRITE_ONCE(v->counter, i);
46fab957c1SPalmer Dabbelt }
47fab957c1SPalmer Dabbelt #endif
48fab957c1SPalmer Dabbelt
49fab957c1SPalmer Dabbelt /*
50fab957c1SPalmer Dabbelt * First, the atomic ops that have no ordering constraints and therefor don't
51fab957c1SPalmer Dabbelt * have the AQ or RL bits set. These don't return anything, so there's only
52fab957c1SPalmer Dabbelt * one version to worry about.
53fab957c1SPalmer Dabbelt */
544650d02aSPalmer Dabbelt #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
555ce6c1f3SAndrea Parri static __always_inline \
569efbb355SMark Rutland void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
57fab957c1SPalmer Dabbelt { \
58fab957c1SPalmer Dabbelt __asm__ __volatile__ ( \
59fab957c1SPalmer Dabbelt " amo" #asm_op "." #asm_type " zero, %1, %0" \
60fab957c1SPalmer Dabbelt : "+A" (v->counter) \
61fab957c1SPalmer Dabbelt : "r" (I) \
62fab957c1SPalmer Dabbelt : "memory"); \
635ce6c1f3SAndrea Parri } \
64fab957c1SPalmer Dabbelt
65fab957c1SPalmer Dabbelt #ifdef CONFIG_GENERIC_ATOMIC64
664650d02aSPalmer Dabbelt #define ATOMIC_OPS(op, asm_op, I) \
674650d02aSPalmer Dabbelt ATOMIC_OP (op, asm_op, I, w, int, )
68fab957c1SPalmer Dabbelt #else
694650d02aSPalmer Dabbelt #define ATOMIC_OPS(op, asm_op, I) \
704650d02aSPalmer Dabbelt ATOMIC_OP (op, asm_op, I, w, int, ) \
7107542118SMark Rutland ATOMIC_OP (op, asm_op, I, d, s64, 64)
72fab957c1SPalmer Dabbelt #endif
73fab957c1SPalmer Dabbelt
ATOMIC_OPS(add,add,i)744650d02aSPalmer Dabbelt ATOMIC_OPS(add, add, i)
754650d02aSPalmer Dabbelt ATOMIC_OPS(sub, add, -i)
764650d02aSPalmer Dabbelt ATOMIC_OPS(and, and, i)
774650d02aSPalmer Dabbelt ATOMIC_OPS( or, or, i)
784650d02aSPalmer Dabbelt ATOMIC_OPS(xor, xor, i)
79fab957c1SPalmer Dabbelt
80fab957c1SPalmer Dabbelt #undef ATOMIC_OP
81fab957c1SPalmer Dabbelt #undef ATOMIC_OPS
82fab957c1SPalmer Dabbelt
83fab957c1SPalmer Dabbelt /*
845ce6c1f3SAndrea Parri * Atomic ops that have ordered, relaxed, acquire, and release variants.
85fab957c1SPalmer Dabbelt * There's two flavors of these: the arithmatic ops have both fetch and return
86fab957c1SPalmer Dabbelt * versions, while the logical ops only have fetch versions.
87fab957c1SPalmer Dabbelt */
885ce6c1f3SAndrea Parri #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
895ce6c1f3SAndrea Parri static __always_inline \
909efbb355SMark Rutland c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
915ce6c1f3SAndrea Parri atomic##prefix##_t *v) \
92fab957c1SPalmer Dabbelt { \
93fab957c1SPalmer Dabbelt register c_type ret; \
94fab957c1SPalmer Dabbelt __asm__ __volatile__ ( \
955ce6c1f3SAndrea Parri " amo" #asm_op "." #asm_type " %1, %2, %0" \
965ce6c1f3SAndrea Parri : "+A" (v->counter), "=r" (ret) \
975ce6c1f3SAndrea Parri : "r" (I) \
985ce6c1f3SAndrea Parri : "memory"); \
995ce6c1f3SAndrea Parri return ret; \
1005ce6c1f3SAndrea Parri } \
1015ce6c1f3SAndrea Parri static __always_inline \
1029efbb355SMark Rutland c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
1035ce6c1f3SAndrea Parri { \
1045ce6c1f3SAndrea Parri register c_type ret; \
1055ce6c1f3SAndrea Parri __asm__ __volatile__ ( \
1065ce6c1f3SAndrea Parri " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
107fab957c1SPalmer Dabbelt : "+A" (v->counter), "=r" (ret) \
108fab957c1SPalmer Dabbelt : "r" (I) \
109fab957c1SPalmer Dabbelt : "memory"); \
110fab957c1SPalmer Dabbelt return ret; \
111fab957c1SPalmer Dabbelt }
112fab957c1SPalmer Dabbelt
1135ce6c1f3SAndrea Parri #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
1145ce6c1f3SAndrea Parri static __always_inline \
1159efbb355SMark Rutland c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
1165ce6c1f3SAndrea Parri atomic##prefix##_t *v) \
117fab957c1SPalmer Dabbelt { \
1189efbb355SMark Rutland return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
1195ce6c1f3SAndrea Parri } \
1205ce6c1f3SAndrea Parri static __always_inline \
1219efbb355SMark Rutland c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
1225ce6c1f3SAndrea Parri { \
1239efbb355SMark Rutland return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
124fab957c1SPalmer Dabbelt }
125fab957c1SPalmer Dabbelt
126fab957c1SPalmer Dabbelt #ifdef CONFIG_GENERIC_ATOMIC64
1275ce6c1f3SAndrea Parri #define ATOMIC_OPS(op, asm_op, c_op, I) \
1285ce6c1f3SAndrea Parri ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
1295ce6c1f3SAndrea Parri ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
130fab957c1SPalmer Dabbelt #else
1315ce6c1f3SAndrea Parri #define ATOMIC_OPS(op, asm_op, c_op, I) \
1325ce6c1f3SAndrea Parri ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
1335ce6c1f3SAndrea Parri ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
13407542118SMark Rutland ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
13507542118SMark Rutland ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
136fab957c1SPalmer Dabbelt #endif
137fab957c1SPalmer Dabbelt
1385ce6c1f3SAndrea Parri ATOMIC_OPS(add, add, +, i)
1395ce6c1f3SAndrea Parri ATOMIC_OPS(sub, add, +, -i)
140fab957c1SPalmer Dabbelt
1419efbb355SMark Rutland #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
1429efbb355SMark Rutland #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
1439efbb355SMark Rutland #define arch_atomic_add_return arch_atomic_add_return
1449efbb355SMark Rutland #define arch_atomic_sub_return arch_atomic_sub_return
1455ce6c1f3SAndrea Parri
1469efbb355SMark Rutland #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
1479efbb355SMark Rutland #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
1489efbb355SMark Rutland #define arch_atomic_fetch_add arch_atomic_fetch_add
1499efbb355SMark Rutland #define arch_atomic_fetch_sub arch_atomic_fetch_sub
1505ce6c1f3SAndrea Parri
1515ce6c1f3SAndrea Parri #ifndef CONFIG_GENERIC_ATOMIC64
1529efbb355SMark Rutland #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
1539efbb355SMark Rutland #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
1549efbb355SMark Rutland #define arch_atomic64_add_return arch_atomic64_add_return
1559efbb355SMark Rutland #define arch_atomic64_sub_return arch_atomic64_sub_return
1565ce6c1f3SAndrea Parri
1579efbb355SMark Rutland #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
1589efbb355SMark Rutland #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
1599efbb355SMark Rutland #define arch_atomic64_fetch_add arch_atomic64_fetch_add
1609efbb355SMark Rutland #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
1615ce6c1f3SAndrea Parri #endif
162fab957c1SPalmer Dabbelt
163fab957c1SPalmer Dabbelt #undef ATOMIC_OPS
164fab957c1SPalmer Dabbelt
165fab957c1SPalmer Dabbelt #ifdef CONFIG_GENERIC_ATOMIC64
1665ce6c1f3SAndrea Parri #define ATOMIC_OPS(op, asm_op, I) \
1675ce6c1f3SAndrea Parri ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
168fab957c1SPalmer Dabbelt #else
1695ce6c1f3SAndrea Parri #define ATOMIC_OPS(op, asm_op, I) \
1705ce6c1f3SAndrea Parri ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
17107542118SMark Rutland ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
172fab957c1SPalmer Dabbelt #endif
173fab957c1SPalmer Dabbelt
1745ce6c1f3SAndrea Parri ATOMIC_OPS(and, and, i)
1755ce6c1f3SAndrea Parri ATOMIC_OPS( or, or, i)
1765ce6c1f3SAndrea Parri ATOMIC_OPS(xor, xor, i)
177fab957c1SPalmer Dabbelt
1789efbb355SMark Rutland #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
1799efbb355SMark Rutland #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
1809efbb355SMark Rutland #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
1819efbb355SMark Rutland #define arch_atomic_fetch_and arch_atomic_fetch_and
1829efbb355SMark Rutland #define arch_atomic_fetch_or arch_atomic_fetch_or
1839efbb355SMark Rutland #define arch_atomic_fetch_xor arch_atomic_fetch_xor
184fab957c1SPalmer Dabbelt
1855ce6c1f3SAndrea Parri #ifndef CONFIG_GENERIC_ATOMIC64
1869efbb355SMark Rutland #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
1879efbb355SMark Rutland #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
1889efbb355SMark Rutland #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
1899efbb355SMark Rutland #define arch_atomic64_fetch_and arch_atomic64_fetch_and
1909efbb355SMark Rutland #define arch_atomic64_fetch_or arch_atomic64_fetch_or
1919efbb355SMark Rutland #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
1925ce6c1f3SAndrea Parri #endif
193fab957c1SPalmer Dabbelt
194fab957c1SPalmer Dabbelt #undef ATOMIC_OPS
195fab957c1SPalmer Dabbelt
196fab957c1SPalmer Dabbelt #undef ATOMIC_FETCH_OP
197fab957c1SPalmer Dabbelt #undef ATOMIC_OP_RETURN
198fab957c1SPalmer Dabbelt
1995ce6c1f3SAndrea Parri /* This is required to provide a full barrier on success. */
2009efbb355SMark Rutland static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
201fab957c1SPalmer Dabbelt {
202fab957c1SPalmer Dabbelt int prev, rc;
203fab957c1SPalmer Dabbelt
204fab957c1SPalmer Dabbelt __asm__ __volatile__ (
2055ce6c1f3SAndrea Parri "0: lr.w %[p], %[c]\n"
2065ce6c1f3SAndrea Parri " beq %[p], %[u], 1f\n"
2075ce6c1f3SAndrea Parri " add %[rc], %[p], %[a]\n"
2085ce6c1f3SAndrea Parri " sc.w.rl %[rc], %[rc], %[c]\n"
2095ce6c1f3SAndrea Parri " bnez %[rc], 0b\n"
2105ce6c1f3SAndrea Parri " fence rw, rw\n"
2115ce6c1f3SAndrea Parri "1:\n"
212fab957c1SPalmer Dabbelt : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
213fab957c1SPalmer Dabbelt : [a]"r" (a), [u]"r" (u)
214fab957c1SPalmer Dabbelt : "memory");
215fab957c1SPalmer Dabbelt return prev;
216fab957c1SPalmer Dabbelt }
2179efbb355SMark Rutland #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
218fab957c1SPalmer Dabbelt
219fab957c1SPalmer Dabbelt #ifndef CONFIG_GENERIC_ATOMIC64
arch_atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)2209efbb355SMark Rutland static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
221fab957c1SPalmer Dabbelt {
22207542118SMark Rutland s64 prev;
22307542118SMark Rutland long rc;
224fab957c1SPalmer Dabbelt
225fab957c1SPalmer Dabbelt __asm__ __volatile__ (
2265ce6c1f3SAndrea Parri "0: lr.d %[p], %[c]\n"
2275ce6c1f3SAndrea Parri " beq %[p], %[u], 1f\n"
2285ce6c1f3SAndrea Parri " add %[rc], %[p], %[a]\n"
2295ce6c1f3SAndrea Parri " sc.d.rl %[rc], %[rc], %[c]\n"
2305ce6c1f3SAndrea Parri " bnez %[rc], 0b\n"
2315ce6c1f3SAndrea Parri " fence rw, rw\n"
2325ce6c1f3SAndrea Parri "1:\n"
233fab957c1SPalmer Dabbelt : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
234fab957c1SPalmer Dabbelt : [a]"r" (a), [u]"r" (u)
235fab957c1SPalmer Dabbelt : "memory");
236fab957c1SPalmer Dabbelt return prev;
237fab957c1SPalmer Dabbelt }
2389efbb355SMark Rutland #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
239fab957c1SPalmer Dabbelt #endif
240fab957c1SPalmer Dabbelt
arch_atomic_inc_unless_negative(atomic_t * v)241fab957c1SPalmer Dabbelt static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
242fab957c1SPalmer Dabbelt {
2435ce6c1f3SAndrea Parri int prev, rc;
2448286d51aSPalmer Dabbelt
2455ce6c1f3SAndrea Parri __asm__ __volatile__ (
2465ce6c1f3SAndrea Parri "0: lr.w %[p], %[c]\n"
2479efbb355SMark Rutland " bltz %[p], 1f\n"
248fab957c1SPalmer Dabbelt " addi %[rc], %[p], 1\n"
2495ce6c1f3SAndrea Parri " sc.w.rl %[rc], %[rc], %[c]\n"
250fab957c1SPalmer Dabbelt " bnez %[rc], 0b\n"
2515ce6c1f3SAndrea Parri " fence rw, rw\n"
2529efbb355SMark Rutland "1:\n"
253fab957c1SPalmer Dabbelt : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
2545ce6c1f3SAndrea Parri :
2555ce6c1f3SAndrea Parri : "memory");
2565ce6c1f3SAndrea Parri return !(prev < 0);
2579efbb355SMark Rutland }
2585ce6c1f3SAndrea Parri
2595ce6c1f3SAndrea Parri #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
2605ce6c1f3SAndrea Parri
arch_atomic_dec_unless_positive(atomic_t * v)2615ce6c1f3SAndrea Parri static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
2629efbb355SMark Rutland {
2635ce6c1f3SAndrea Parri int prev, rc;
264*06855063SAndrzej Hajda
2655ce6c1f3SAndrea Parri __asm__ __volatile__ (
2665ce6c1f3SAndrea Parri "0: lr.w %[p], %[c]\n"
2679efbb355SMark Rutland " bgtz %[p], 1f\n"
2685ce6c1f3SAndrea Parri " addi %[rc], %[p], -1\n"
2695ce6c1f3SAndrea Parri " sc.w.rl %[rc], %[rc], %[c]\n"
2705ce6c1f3SAndrea Parri " bnez %[rc], 0b\n"
2715ce6c1f3SAndrea Parri " fence rw, rw\n"
2725ce6c1f3SAndrea Parri "1:\n"
2739efbb355SMark Rutland : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
2745ce6c1f3SAndrea Parri :
2755ce6c1f3SAndrea Parri : "memory");
2765ce6c1f3SAndrea Parri return !(prev > 0);
2775ce6c1f3SAndrea Parri }
2785ce6c1f3SAndrea Parri
2799efbb355SMark Rutland #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
2805ce6c1f3SAndrea Parri
arch_atomic_dec_if_positive(atomic_t * v)2815ce6c1f3SAndrea Parri static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
2825ce6c1f3SAndrea Parri {
2835ce6c1f3SAndrea Parri int prev, rc;
2845ce6c1f3SAndrea Parri
2859efbb355SMark Rutland __asm__ __volatile__ (
2865ce6c1f3SAndrea Parri "0: lr.w %[p], %[c]\n"
2875ce6c1f3SAndrea Parri " addi %[rc], %[p], -1\n"
288fab957c1SPalmer Dabbelt " bltz %[rc], 1f\n"
289fab957c1SPalmer Dabbelt " sc.w.rl %[rc], %[rc], %[c]\n"
290fab957c1SPalmer Dabbelt " bnez %[rc], 0b\n"
2915ce6c1f3SAndrea Parri " fence rw, rw\n"
2925ce6c1f3SAndrea Parri "1:\n"
293fab957c1SPalmer Dabbelt : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
2945ce6c1f3SAndrea Parri :
2955ce6c1f3SAndrea Parri : "memory");
29607542118SMark Rutland return prev - 1;
297fab957c1SPalmer Dabbelt }
298fab957c1SPalmer Dabbelt
2995ce6c1f3SAndrea Parri #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
300fab957c1SPalmer Dabbelt
3019efbb355SMark Rutland #ifndef CONFIG_GENERIC_ATOMIC64
arch_atomic64_inc_unless_negative(atomic64_t * v)3029efbb355SMark Rutland static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
3039efbb355SMark Rutland {
3049efbb355SMark Rutland s64 prev;
3059efbb355SMark Rutland long rc;
3069efbb355SMark Rutland
3079efbb355SMark Rutland __asm__ __volatile__ (
3089efbb355SMark Rutland "0: lr.d %[p], %[c]\n"
3098b699616SAndrea Parri " bltz %[p], 1f\n"
310fab957c1SPalmer Dabbelt " addi %[rc], %[p], 1\n"
311fab957c1SPalmer Dabbelt " sc.d.rl %[rc], %[rc], %[c]\n"
312fab957c1SPalmer Dabbelt " bnez %[rc], 0b\n"
3134420658aSGuo Ren " fence rw, rw\n"
3144420658aSGuo Ren "1:\n"
3154420658aSGuo Ren : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
3164420658aSGuo Ren :
3174420658aSGuo Ren : "memory");
3184420658aSGuo Ren return !(prev < 0);
3194420658aSGuo Ren }
3204420658aSGuo Ren
3214420658aSGuo Ren #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
3224420658aSGuo Ren
arch_atomic64_dec_unless_positive(atomic64_t * v)3234420658aSGuo Ren static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
3244420658aSGuo Ren {
3254420658aSGuo Ren s64 prev;
3264420658aSGuo Ren long rc;
3274420658aSGuo Ren
3284420658aSGuo Ren __asm__ __volatile__ (
3294420658aSGuo Ren "0: lr.d %[p], %[c]\n"
3304420658aSGuo Ren " bgtz %[p], 1f\n"
3314420658aSGuo Ren " addi %[rc], %[p], -1\n"
3324420658aSGuo Ren " sc.d.rl %[rc], %[rc], %[c]\n"
3334420658aSGuo Ren " bnez %[rc], 0b\n"
3344420658aSGuo Ren " fence rw, rw\n"
3354420658aSGuo Ren "1:\n"
3364420658aSGuo Ren : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
3374420658aSGuo Ren :
3384420658aSGuo Ren : "memory");
3394420658aSGuo Ren return !(prev > 0);
3404420658aSGuo Ren }
3414420658aSGuo Ren
3424420658aSGuo Ren #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
3434420658aSGuo Ren
arch_atomic64_dec_if_positive(atomic64_t * v)3444420658aSGuo Ren static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
3454420658aSGuo Ren {
3464420658aSGuo Ren s64 prev;
3474420658aSGuo Ren long rc;
3484420658aSGuo Ren
3494420658aSGuo Ren __asm__ __volatile__ (
3504420658aSGuo Ren "0: lr.d %[p], %[c]\n"
3514420658aSGuo Ren " addi %[rc], %[p], -1\n"
3524420658aSGuo Ren " bltz %[rc], 1f\n"
3531d7f6932SGuo Ren " sc.d.rl %[rc], %[rc], %[c]\n"
354fab957c1SPalmer Dabbelt " bnez %[rc], 0b\n"
355fab957c1SPalmer Dabbelt " fence rw, rw\n"
356fab957c1SPalmer Dabbelt "1:\n"
357fab957c1SPalmer Dabbelt : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
3585ce6c1f3SAndrea Parri :
3591d7f6932SGuo Ren : "memory");
3605ce6c1f3SAndrea Parri return prev - 1;
3615ce6c1f3SAndrea Parri }
3625ce6c1f3SAndrea Parri
3635ce6c1f3SAndrea Parri #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
3645ce6c1f3SAndrea Parri #endif
365fab957c1SPalmer Dabbelt
3661d7f6932SGuo Ren #endif /* _ASM_RISCV_ATOMIC_H */
367fab957c1SPalmer Dabbelt