xref: /openbmc/linux/arch/s390/include/asm/atomic.h (revision 31e67366)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright IBM Corp. 1999, 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5  *	      Denis Joseph Barrow,
6  *	      Arnd Bergmann,
7  */
8 
9 #ifndef __ARCH_S390_ATOMIC__
10 #define __ARCH_S390_ATOMIC__
11 
12 #include <linux/compiler.h>
13 #include <linux/types.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17 
18 static inline int atomic_read(const atomic_t *v)
19 {
20 	int c;
21 
22 	asm volatile(
23 		"	l	%0,%1\n"
24 		: "=d" (c) : "Q" (v->counter));
25 	return c;
26 }
27 
28 static inline void atomic_set(atomic_t *v, int i)
29 {
30 	asm volatile(
31 		"	st	%1,%0\n"
32 		: "=Q" (v->counter) : "d" (i));
33 }
34 
35 static inline int atomic_add_return(int i, atomic_t *v)
36 {
37 	return __atomic_add_barrier(i, &v->counter) + i;
38 }
39 
40 static inline int atomic_fetch_add(int i, atomic_t *v)
41 {
42 	return __atomic_add_barrier(i, &v->counter);
43 }
44 
45 static inline void atomic_add(int i, atomic_t *v)
46 {
47 	__atomic_add(i, &v->counter);
48 }
49 
50 #define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
51 #define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
52 #define atomic_fetch_sub(_i, _v)	atomic_fetch_add(-(int)(_i), _v)
53 
54 #define ATOMIC_OPS(op)							\
55 static inline void atomic_##op(int i, atomic_t *v)			\
56 {									\
57 	__atomic_##op(i, &v->counter);					\
58 }									\
59 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
60 {									\
61 	return __atomic_##op##_barrier(i, &v->counter);			\
62 }
63 
64 ATOMIC_OPS(and)
65 ATOMIC_OPS(or)
66 ATOMIC_OPS(xor)
67 
68 #undef ATOMIC_OPS
69 
70 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
71 
72 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
73 {
74 	return __atomic_cmpxchg(&v->counter, old, new);
75 }
76 
77 #define ATOMIC64_INIT(i)  { (i) }
78 
79 static inline s64 atomic64_read(const atomic64_t *v)
80 {
81 	s64 c;
82 
83 	asm volatile(
84 		"	lg	%0,%1\n"
85 		: "=d" (c) : "Q" (v->counter));
86 	return c;
87 }
88 
89 static inline void atomic64_set(atomic64_t *v, s64 i)
90 {
91 	asm volatile(
92 		"	stg	%1,%0\n"
93 		: "=Q" (v->counter) : "d" (i));
94 }
95 
96 static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
97 {
98 	return __atomic64_add_barrier(i, (long *)&v->counter) + i;
99 }
100 
101 static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
102 {
103 	return __atomic64_add_barrier(i, (long *)&v->counter);
104 }
105 
106 static inline void atomic64_add(s64 i, atomic64_t *v)
107 {
108 	__atomic64_add(i, (long *)&v->counter);
109 }
110 
111 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
112 
113 static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
114 {
115 	return __atomic64_cmpxchg((long *)&v->counter, old, new);
116 }
117 
118 #define ATOMIC64_OPS(op)						\
119 static inline void atomic64_##op(s64 i, atomic64_t *v)			\
120 {									\
121 	__atomic64_##op(i, (long *)&v->counter);			\
122 }									\
123 static inline long atomic64_fetch_##op(s64 i, atomic64_t *v)		\
124 {									\
125 	return __atomic64_##op##_barrier(i, (long *)&v->counter);	\
126 }
127 
128 ATOMIC64_OPS(and)
129 ATOMIC64_OPS(or)
130 ATOMIC64_OPS(xor)
131 
132 #undef ATOMIC64_OPS
133 
134 #define atomic64_sub_return(_i, _v)	atomic64_add_return(-(s64)(_i), _v)
135 #define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(s64)(_i), _v)
136 #define atomic64_sub(_i, _v)		atomic64_add(-(s64)(_i), _v)
137 
138 #endif /* __ARCH_S390_ATOMIC__  */
139