1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #ifndef _ASM_ARC_ATOMIC_SPLOCK_H
4 #define _ASM_ARC_ATOMIC_SPLOCK_H
5
6 /*
7 * Non hardware assisted Atomic-R-M-W
8 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
9 */
10
arch_atomic_set(atomic_t * v,int i)11 static inline void arch_atomic_set(atomic_t *v, int i)
12 {
13 /*
14 * Independent of hardware support, all of the atomic_xxx() APIs need
15 * to follow the same locking rules to make sure that a "hardware"
16 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
17 * sequence
18 *
19 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
20 * requires the locking.
21 */
22 unsigned long flags;
23
24 atomic_ops_lock(flags);
25 WRITE_ONCE(v->counter, i);
26 atomic_ops_unlock(flags);
27 }
28
29 #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
30
31 #define ATOMIC_OP(op, c_op, asm_op) \
32 static inline void arch_atomic_##op(int i, atomic_t *v) \
33 { \
34 unsigned long flags; \
35 \
36 atomic_ops_lock(flags); \
37 v->counter c_op i; \
38 atomic_ops_unlock(flags); \
39 }
40
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
42 static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
43 { \
44 unsigned long flags; \
45 unsigned int temp; \
46 \
47 /* \
48 * spin lock/unlock provides the needed smp_mb() before/after \
49 */ \
50 atomic_ops_lock(flags); \
51 temp = v->counter; \
52 temp c_op i; \
53 v->counter = temp; \
54 atomic_ops_unlock(flags); \
55 \
56 return temp; \
57 }
58
59 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
60 static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
61 { \
62 unsigned long flags; \
63 unsigned int orig; \
64 \
65 /* \
66 * spin lock/unlock provides the needed smp_mb() before/after \
67 */ \
68 atomic_ops_lock(flags); \
69 orig = v->counter; \
70 v->counter c_op i; \
71 atomic_ops_unlock(flags); \
72 \
73 return orig; \
74 }
75
76 #define ATOMIC_OPS(op, c_op, asm_op) \
77 ATOMIC_OP(op, c_op, asm_op) \
78 ATOMIC_OP_RETURN(op, c_op, asm_op) \
79 ATOMIC_FETCH_OP(op, c_op, asm_op)
80
81 ATOMIC_OPS(add, +=, add)
82 ATOMIC_OPS(sub, -=, sub)
83
84 #define arch_atomic_fetch_add arch_atomic_fetch_add
85 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
86 #define arch_atomic_add_return arch_atomic_add_return
87 #define arch_atomic_sub_return arch_atomic_sub_return
88
89 #undef ATOMIC_OPS
90 #define ATOMIC_OPS(op, c_op, asm_op) \
91 ATOMIC_OP(op, c_op, asm_op) \
92 ATOMIC_FETCH_OP(op, c_op, asm_op)
93
94 ATOMIC_OPS(and, &=, and)
95 ATOMIC_OPS(andnot, &= ~, bic)
96 ATOMIC_OPS(or, |=, or)
97 ATOMIC_OPS(xor, ^=, xor)
98
99 #define arch_atomic_andnot arch_atomic_andnot
100
101 #define arch_atomic_fetch_and arch_atomic_fetch_and
102 #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
103 #define arch_atomic_fetch_or arch_atomic_fetch_or
104 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
105
106 #undef ATOMIC_OPS
107 #undef ATOMIC_FETCH_OP
108 #undef ATOMIC_OP_RETURN
109 #undef ATOMIC_OP
110
111 #endif
112